aotrih commited on
Commit
3a08276
1 Parent(s): 6adff3d

whisperkittools-b1f8009e0453602564324de26c2e8963d22bc009 generated files: openai_whisper-tiny.en

Browse files
openai_whisper-tiny.en/AudioEncoder.mlmodelc/weights/weight.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d5dd3d07ebd9f09e1f4d2cb5e723b7507370a2239cdded9a5375628ed7e1be88
3
  size 16422784
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0f1d97b0c5ae23b768c81c378c9e01d81b8ecbcf8107f383c68c776ac4959ef
3
  size 16422784
openai_whisper-tiny.en/MelSpectrogram.mlmodelc/coremldata.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dabdc5aa69f6ef4d97dc9499f5c30514e00e96b53b750b33a5a6471363c71662
3
  size 328
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d62993c963bf179f8a2307d6480208d56ce71fc63877a0b11eacbf15ef6e1c6
3
  size 328
openai_whisper-tiny.en/MelSpectrogram.mlmodelc/metadata.json CHANGED
@@ -50,8 +50,8 @@
50
  },
51
  "userDefinedMetadata" : {
52
  "com.github.apple.coremltools.source_dialect" : "TorchScript",
53
- "com.github.apple.coremltools.version" : "7.1",
54
- "com.github.apple.coremltools.source" : "torch==2.2.1"
55
  },
56
  "inputSchema" : [
57
  {
 
50
  },
51
  "userDefinedMetadata" : {
52
  "com.github.apple.coremltools.source_dialect" : "TorchScript",
53
+ "com.github.apple.coremltools.source" : "torch==2.2.1",
54
+ "com.github.apple.coremltools.version" : "7.1"
55
  },
56
  "inputSchema" : [
57
  {
openai_whisper-tiny.en/MelSpectrogram.mlmodelc/weights/weight.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:45634c808d510c6cae6cc148354133aecc437612fe27d8df346f37ae302503eb
3
  size 354080
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49d43a9ef39b3c743b66296f0ecc6d9a3dcd7fdba164a64fb04087adfbc3c947
3
  size 354080
openai_whisper-tiny.en/TextDecoder.mlmodelc/analytics/coremldata.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e18966f107bc33b55e2b449982028367637f37865b0775ae429334b3bc13160
3
  size 243
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a3e0bfa2afd1214f7331cf018446b178cac5d9435b62d6d8a137aa774db6bd6
3
  size 243
openai_whisper-tiny.en/TextDecoder.mlmodelc/coremldata.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c3d9ad3d15525ca0eabf507e4ea88fb19b6d8d5d09191c8eaddffe82c8042959
3
  size 633
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c220318d8d76d90618a478848e19a2ef820264b3e9e78d4da479851f7da1e2d2
3
  size 633
openai_whisper-tiny.en/TextDecoder.mlmodelc/metadata.json CHANGED
@@ -84,8 +84,8 @@
84
  },
85
  "userDefinedMetadata" : {
86
  "com.github.apple.coremltools.source_dialect" : "TorchScript",
87
- "com.github.apple.coremltools.source" : "torch==2.2.1",
88
- "com.github.apple.coremltools.version" : "7.1"
89
  },
90
  "inputSchema" : [
91
  {
@@ -112,9 +112,9 @@
112
  "hasShapeFlexibility" : "0",
113
  "isOptional" : "0",
114
  "dataType" : "Float16",
115
- "formattedType" : "MultiArray (Float16 1 × 1536 × 1 × 224)",
116
  "shortDescription" : "",
117
- "shape" : "[1, 1536, 1, 224]",
118
  "name" : "key_cache",
119
  "type" : "MultiArray"
120
  },
@@ -122,9 +122,9 @@
122
  "hasShapeFlexibility" : "0",
123
  "isOptional" : "0",
124
  "dataType" : "Float16",
125
- "formattedType" : "MultiArray (Float16 1 × 1536 × 1 × 224)",
126
  "shortDescription" : "",
127
- "shape" : "[1, 1536, 1, 224]",
128
  "name" : "value_cache",
129
  "type" : "MultiArray"
130
  },
@@ -132,9 +132,9 @@
132
  "hasShapeFlexibility" : "0",
133
  "isOptional" : "0",
134
  "dataType" : "Float16",
135
- "formattedType" : "MultiArray (Float16 1 × 224)",
136
  "shortDescription" : "",
137
- "shape" : "[1, 224]",
138
  "name" : "kv_cache_update_mask",
139
  "type" : "MultiArray"
140
  },
@@ -152,9 +152,9 @@
152
  "hasShapeFlexibility" : "0",
153
  "isOptional" : "0",
154
  "dataType" : "Float16",
155
- "formattedType" : "MultiArray (Float16 1 × 224)",
156
  "shortDescription" : "",
157
- "shape" : "[1, 224]",
158
  "name" : "decoder_key_padding_mask",
159
  "type" : "MultiArray"
160
  }
 
84
  },
85
  "userDefinedMetadata" : {
86
  "com.github.apple.coremltools.source_dialect" : "TorchScript",
87
+ "com.github.apple.coremltools.version" : "7.1",
88
+ "com.github.apple.coremltools.source" : "torch==2.2.1"
89
  },
90
  "inputSchema" : [
91
  {
 
112
  "hasShapeFlexibility" : "0",
113
  "isOptional" : "0",
114
  "dataType" : "Float16",
115
+ "formattedType" : "MultiArray (Float16 1 × 1536 × 1 × 448)",
116
  "shortDescription" : "",
117
+ "shape" : "[1, 1536, 1, 448]",
118
  "name" : "key_cache",
119
  "type" : "MultiArray"
120
  },
 
122
  "hasShapeFlexibility" : "0",
123
  "isOptional" : "0",
124
  "dataType" : "Float16",
125
+ "formattedType" : "MultiArray (Float16 1 × 1536 × 1 × 448)",
126
  "shortDescription" : "",
127
+ "shape" : "[1, 1536, 1, 448]",
128
  "name" : "value_cache",
129
  "type" : "MultiArray"
130
  },
 
132
  "hasShapeFlexibility" : "0",
133
  "isOptional" : "0",
134
  "dataType" : "Float16",
135
+ "formattedType" : "MultiArray (Float16 1 × 448)",
136
  "shortDescription" : "",
137
+ "shape" : "[1, 448]",
138
  "name" : "kv_cache_update_mask",
139
  "type" : "MultiArray"
140
  },
 
152
  "hasShapeFlexibility" : "0",
153
  "isOptional" : "0",
154
  "dataType" : "Float16",
155
+ "formattedType" : "MultiArray (Float16 1 × 448)",
156
  "shortDescription" : "",
157
+ "shape" : "[1, 448]",
158
  "name" : "decoder_key_padding_mask",
159
  "type" : "MultiArray"
160
  }
openai_whisper-tiny.en/TextDecoder.mlmodelc/model.mil CHANGED
@@ -1,7 +1,7 @@
1
  program(1.0)
2
  [buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "5.33.5"}, {"coremlc-version", "1877.40.3"}, {"coremltools-component-torch", "2.2.1"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "7.1"}})]
3
  {
4
- func main<ios16>(tensor<int32, [1]> cache_length, tensor<fp16, [1, 224]> decoder_key_padding_mask, tensor<fp16, [1, 384, 1, 1500]> encoder_output_embeds, tensor<int32, [1]> input_ids, tensor<fp16, [1, 1536, 1, 224]> key_cache, tensor<fp16, [1, 224]> kv_cache_update_mask, tensor<fp16, [1, 1536, 1, 224]> value_cache) {
5
  tensor<int32, []> var_24_axis_0 = const()[name = tensor<string, []>("op_24_axis_0"), val = tensor<int32, []>(0)];
6
  tensor<int32, []> var_24_batch_dims_0 = const()[name = tensor<string, []>("op_24_batch_dims_0"), val = tensor<int32, []>(0)];
7
  tensor<fp16, [51864, 384]> embed_tokens_weight_to_fp16 = const()[name = tensor<string, []>("embed_tokens_weight_to_fp16"), val = tensor<fp16, [51864, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
@@ -17,10 +17,10 @@ program(1.0)
17
  tensor<fp16, [1, 384, 1, 1]> inputs_1_cast_fp16 = expand_dims(axes = inputs_1_axes_0, x = var_42_cast_fp16)[name = tensor<string, []>("inputs_1_cast_fp16")];
18
  tensor<int32, [4]> tile_0 = const()[name = tensor<string, []>("tile_0"), val = tensor<int32, [4]>([384, 384, 384, 384])];
19
  tensor<int32, []> var_47_axis_0 = const()[name = tensor<string, []>("op_47_axis_0"), val = tensor<int32, []>(1)];
20
- tensor<fp16, [1, 384, 1, 224]> var_47_cast_fp16_0, tensor<fp16, [1, 384, 1, 224]> var_47_cast_fp16_1, tensor<fp16, [1, 384, 1, 224]> var_47_cast_fp16_2, tensor<fp16, [1, 384, 1, 224]> var_47_cast_fp16_3 = split(axis = var_47_axis_0, split_sizes = tile_0, x = key_cache)[name = tensor<string, []>("op_47_cast_fp16")];
21
  tensor<int32, [4]> tile_1 = const()[name = tensor<string, []>("tile_1"), val = tensor<int32, [4]>([384, 384, 384, 384])];
22
  tensor<int32, []> var_54_axis_0 = const()[name = tensor<string, []>("op_54_axis_0"), val = tensor<int32, []>(1)];
23
- tensor<fp16, [1, 384, 1, 224]> var_54_cast_fp16_0, tensor<fp16, [1, 384, 1, 224]> var_54_cast_fp16_1, tensor<fp16, [1, 384, 1, 224]> var_54_cast_fp16_2, tensor<fp16, [1, 384, 1, 224]> var_54_cast_fp16_3 = split(axis = var_54_axis_0, split_sizes = tile_1, x = value_cache)[name = tensor<string, []>("op_54_cast_fp16")];
24
  tensor<int32, []> var_64 = const()[name = tensor<string, []>("op_64"), val = tensor<int32, []>(3)];
25
  tensor<int32, []> var_71 = const()[name = tensor<string, []>("op_71"), val = tensor<int32, []>(1)];
26
  tensor<bool, []> var_72 = const()[name = tensor<string, []>("op_72"), val = tensor<bool, []>(true)];
@@ -62,34 +62,34 @@ program(1.0)
62
  tensor<fp16, [384]> layers_0_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(41064896)))];
63
  tensor<fp16, [1, 384, 1, 1]> current_value_1_cast_fp16 = conv(bias = layers_0_self_attn_v_proj_bias_to_fp16, dilations = var_121, groups = var_71, pad = current_value_1_pad_0, pad_type = current_value_1_pad_type_0, strides = var_119, weight = layers_0_self_attn_v_proj_weight_to_fp16, x = obj_1_cast_fp16)[name = tensor<string, []>("current_value_1_cast_fp16")];
64
  tensor<int32, [1]> var_125_axes_0 = const()[name = tensor<string, []>("op_125_axes_0"), val = tensor<int32, [1]>([1])];
65
- tensor<fp16, [1, 1, 224]> var_125_cast_fp16 = expand_dims(axes = var_125_axes_0, x = kv_cache_update_mask)[name = tensor<string, []>("op_125_cast_fp16")];
66
  tensor<int32, [1]> var_126_axes_0 = const()[name = tensor<string, []>("op_126_axes_0"), val = tensor<int32, [1]>([2])];
67
- tensor<fp16, [1, 1, 1, 224]> var_126_cast_fp16 = expand_dims(axes = var_126_axes_0, x = var_125_cast_fp16)[name = tensor<string, []>("op_126_cast_fp16")];
68
- tensor<fp16, [1, 384, 1, 224]> var_128_cast_fp16 = mul(x = current_key_1_cast_fp16, y = var_126_cast_fp16)[name = tensor<string, []>("op_128_cast_fp16")];
69
  tensor<fp16, []> var_65_to_fp16 = const()[name = tensor<string, []>("op_65_to_fp16"), val = tensor<fp16, []>(0x1p+0)];
70
- tensor<fp16, [1, 1, 1, 224]> var_129_cast_fp16 = sub(x = var_65_to_fp16, y = var_126_cast_fp16)[name = tensor<string, []>("op_129_cast_fp16")];
71
- tensor<fp16, [1, 384, 1, 224]> var_130_cast_fp16 = mul(x = var_47_cast_fp16_0, y = var_129_cast_fp16)[name = tensor<string, []>("op_130_cast_fp16")];
72
- tensor<fp16, [1, 384, 1, 224]> key_1_cast_fp16 = add(x = var_128_cast_fp16, y = var_130_cast_fp16)[name = tensor<string, []>("key_1_cast_fp16")];
73
- tensor<fp16, [1, 384, 1, 224]> var_132_cast_fp16 = mul(x = current_value_1_cast_fp16, y = var_126_cast_fp16)[name = tensor<string, []>("op_132_cast_fp16")];
74
- tensor<fp16, [1, 384, 1, 224]> var_134_cast_fp16 = mul(x = var_54_cast_fp16_0, y = var_129_cast_fp16)[name = tensor<string, []>("op_134_cast_fp16")];
75
- tensor<fp16, [1, 384, 1, 224]> value_1_cast_fp16 = add(x = var_132_cast_fp16, y = var_134_cast_fp16)[name = tensor<string, []>("value_1_cast_fp16")];
76
  tensor<int32, [4]> var_137 = const()[name = tensor<string, []>("op_137"), val = tensor<int32, [4]>([1, 6, 64, -1])];
77
  tensor<fp16, [1, 6, 64, 1]> var_138_cast_fp16 = reshape(shape = var_137, x = query_1_cast_fp16)[name = tensor<string, []>("op_138_cast_fp16")];
78
  tensor<fp16, []> var_139_to_fp16 = const()[name = tensor<string, []>("op_139_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
79
  tensor<fp16, [1, 6, 64, 1]> var_140_cast_fp16 = mul(x = var_138_cast_fp16, y = var_139_to_fp16)[name = tensor<string, []>("op_140_cast_fp16")];
80
  tensor<int32, [4]> var_141 = const()[name = tensor<string, []>("op_141"), val = tensor<int32, [4]>([1, 6, 64, -1])];
81
- tensor<fp16, [1, 6, 64, 224]> var_142_cast_fp16 = reshape(shape = var_141, x = key_1_cast_fp16)[name = tensor<string, []>("op_142_cast_fp16")];
82
  tensor<bool, []> mh_w_1_transpose_x_0 = const()[name = tensor<string, []>("mh_w_1_transpose_x_0"), val = tensor<bool, []>(true)];
83
  tensor<bool, []> mh_w_1_transpose_y_0 = const()[name = tensor<string, []>("mh_w_1_transpose_y_0"), val = tensor<bool, []>(false)];
84
- tensor<fp16, [1, 6, 1, 224]> mh_w_1_cast_fp16 = matmul(transpose_x = mh_w_1_transpose_x_0, transpose_y = mh_w_1_transpose_y_0, x = var_140_cast_fp16, y = var_142_cast_fp16)[name = tensor<string, []>("mh_w_1_cast_fp16")];
85
  tensor<int32, [1]> var_146_axes_0 = const()[name = tensor<string, []>("op_146_axes_0"), val = tensor<int32, [1]>([1])];
86
- tensor<fp16, [1, 1, 224]> var_146_cast_fp16 = expand_dims(axes = var_146_axes_0, x = decoder_key_padding_mask)[name = tensor<string, []>("op_146_cast_fp16")];
87
  tensor<int32, [1]> var_147_axes_0 = const()[name = tensor<string, []>("op_147_axes_0"), val = tensor<int32, [1]>([2])];
88
- tensor<fp16, [1, 1, 1, 224]> var_147_cast_fp16 = expand_dims(axes = var_147_axes_0, x = var_146_cast_fp16)[name = tensor<string, []>("op_147_cast_fp16")];
89
- tensor<fp16, [1, 6, 1, 224]> mh_w_3_cast_fp16 = add(x = mh_w_1_cast_fp16, y = var_147_cast_fp16)[name = tensor<string, []>("mh_w_3_cast_fp16")];
90
- tensor<fp16, [1, 6, 1, 224]> var_150_cast_fp16 = softmax(axis = var_64, x = mh_w_3_cast_fp16)[name = tensor<string, []>("op_150_cast_fp16")];
91
  tensor<int32, [4]> var_151 = const()[name = tensor<string, []>("op_151"), val = tensor<int32, [4]>([1, 6, 64, -1])];
92
- tensor<fp16, [1, 6, 64, 224]> var_152_cast_fp16 = reshape(shape = var_151, x = value_1_cast_fp16)[name = tensor<string, []>("op_152_cast_fp16")];
93
  tensor<bool, []> attn_1_transpose_x_0 = const()[name = tensor<string, []>("attn_1_transpose_x_0"), val = tensor<bool, []>(false)];
94
  tensor<bool, []> attn_1_transpose_y_0 = const()[name = tensor<string, []>("attn_1_transpose_y_0"), val = tensor<bool, []>(true)];
95
  tensor<fp16, [1, 6, 64, 1]> attn_1_cast_fp16 = matmul(transpose_x = attn_1_transpose_x_0, transpose_y = attn_1_transpose_y_0, x = var_152_cast_fp16, y = var_150_cast_fp16)[name = tensor<string, []>("attn_1_cast_fp16")];
@@ -233,25 +233,25 @@ program(1.0)
233
  tensor<fp16, [384, 384, 1, 1]> layers_1_self_attn_v_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(45503104)))];
234
  tensor<fp16, [384]> layers_1_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(45798080)))];
235
  tensor<fp16, [1, 384, 1, 1]> current_value_3_cast_fp16 = conv(bias = layers_1_self_attn_v_proj_bias_to_fp16, dilations = var_335, groups = var_285, pad = current_value_3_pad_0, pad_type = current_value_3_pad_type_0, strides = var_333, weight = layers_1_self_attn_v_proj_weight_to_fp16, x = obj_15_cast_fp16)[name = tensor<string, []>("current_value_3_cast_fp16")];
236
- tensor<fp16, [1, 384, 1, 224]> var_342_cast_fp16 = mul(x = current_key_3_cast_fp16, y = var_126_cast_fp16)[name = tensor<string, []>("op_342_cast_fp16")];
237
- tensor<fp16, [1, 384, 1, 224]> var_344_cast_fp16 = mul(x = var_47_cast_fp16_1, y = var_129_cast_fp16)[name = tensor<string, []>("op_344_cast_fp16")];
238
- tensor<fp16, [1, 384, 1, 224]> key_5_cast_fp16 = add(x = var_342_cast_fp16, y = var_344_cast_fp16)[name = tensor<string, []>("key_5_cast_fp16")];
239
- tensor<fp16, [1, 384, 1, 224]> var_346_cast_fp16 = mul(x = current_value_3_cast_fp16, y = var_126_cast_fp16)[name = tensor<string, []>("op_346_cast_fp16")];
240
- tensor<fp16, [1, 384, 1, 224]> var_348_cast_fp16 = mul(x = var_54_cast_fp16_1, y = var_129_cast_fp16)[name = tensor<string, []>("op_348_cast_fp16")];
241
- tensor<fp16, [1, 384, 1, 224]> value_5_cast_fp16 = add(x = var_346_cast_fp16, y = var_348_cast_fp16)[name = tensor<string, []>("value_5_cast_fp16")];
242
  tensor<int32, [4]> var_351 = const()[name = tensor<string, []>("op_351"), val = tensor<int32, [4]>([1, 6, 64, -1])];
243
  tensor<fp16, [1, 6, 64, 1]> var_352_cast_fp16 = reshape(shape = var_351, x = query_5_cast_fp16)[name = tensor<string, []>("op_352_cast_fp16")];
244
  tensor<fp16, []> var_353_to_fp16 = const()[name = tensor<string, []>("op_353_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
245
  tensor<fp16, [1, 6, 64, 1]> var_354_cast_fp16 = mul(x = var_352_cast_fp16, y = var_353_to_fp16)[name = tensor<string, []>("op_354_cast_fp16")];
246
  tensor<int32, [4]> var_355 = const()[name = tensor<string, []>("op_355"), val = tensor<int32, [4]>([1, 6, 64, -1])];
247
- tensor<fp16, [1, 6, 64, 224]> var_356_cast_fp16 = reshape(shape = var_355, x = key_5_cast_fp16)[name = tensor<string, []>("op_356_cast_fp16")];
248
  tensor<bool, []> mh_w_7_transpose_x_0 = const()[name = tensor<string, []>("mh_w_7_transpose_x_0"), val = tensor<bool, []>(true)];
249
  tensor<bool, []> mh_w_7_transpose_y_0 = const()[name = tensor<string, []>("mh_w_7_transpose_y_0"), val = tensor<bool, []>(false)];
250
- tensor<fp16, [1, 6, 1, 224]> mh_w_7_cast_fp16 = matmul(transpose_x = mh_w_7_transpose_x_0, transpose_y = mh_w_7_transpose_y_0, x = var_354_cast_fp16, y = var_356_cast_fp16)[name = tensor<string, []>("mh_w_7_cast_fp16")];
251
- tensor<fp16, [1, 6, 1, 224]> mh_w_9_cast_fp16 = add(x = mh_w_7_cast_fp16, y = var_147_cast_fp16)[name = tensor<string, []>("mh_w_9_cast_fp16")];
252
- tensor<fp16, [1, 6, 1, 224]> var_364_cast_fp16 = softmax(axis = var_278, x = mh_w_9_cast_fp16)[name = tensor<string, []>("op_364_cast_fp16")];
253
  tensor<int32, [4]> var_365 = const()[name = tensor<string, []>("op_365"), val = tensor<int32, [4]>([1, 6, 64, -1])];
254
- tensor<fp16, [1, 6, 64, 224]> var_366_cast_fp16 = reshape(shape = var_365, x = value_5_cast_fp16)[name = tensor<string, []>("op_366_cast_fp16")];
255
  tensor<bool, []> attn_5_transpose_x_0 = const()[name = tensor<string, []>("attn_5_transpose_x_0"), val = tensor<bool, []>(false)];
256
  tensor<bool, []> attn_5_transpose_y_0 = const()[name = tensor<string, []>("attn_5_transpose_y_0"), val = tensor<bool, []>(true)];
257
  tensor<fp16, [1, 6, 64, 1]> attn_5_cast_fp16 = matmul(transpose_x = attn_5_transpose_x_0, transpose_y = attn_5_transpose_y_0, x = var_366_cast_fp16, y = var_364_cast_fp16)[name = tensor<string, []>("attn_5_cast_fp16")];
@@ -395,25 +395,25 @@ program(1.0)
395
  tensor<fp16, [384, 384, 1, 1]> layers_2_self_attn_v_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_2_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(50236288)))];
396
  tensor<fp16, [384]> layers_2_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_2_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(50531264)))];
397
  tensor<fp16, [1, 384, 1, 1]> current_value_5_cast_fp16 = conv(bias = layers_2_self_attn_v_proj_bias_to_fp16, dilations = var_553, groups = var_503, pad = current_value_5_pad_0, pad_type = current_value_5_pad_type_0, strides = var_551, weight = layers_2_self_attn_v_proj_weight_to_fp16, x = obj_29_cast_fp16)[name = tensor<string, []>("current_value_5_cast_fp16")];
398
- tensor<fp16, [1, 384, 1, 224]> var_560_cast_fp16 = mul(x = current_key_5_cast_fp16, y = var_126_cast_fp16)[name = tensor<string, []>("op_560_cast_fp16")];
399
- tensor<fp16, [1, 384, 1, 224]> var_562_cast_fp16 = mul(x = var_47_cast_fp16_2, y = var_129_cast_fp16)[name = tensor<string, []>("op_562_cast_fp16")];
400
- tensor<fp16, [1, 384, 1, 224]> key_9_cast_fp16 = add(x = var_560_cast_fp16, y = var_562_cast_fp16)[name = tensor<string, []>("key_9_cast_fp16")];
401
- tensor<fp16, [1, 384, 1, 224]> var_564_cast_fp16 = mul(x = current_value_5_cast_fp16, y = var_126_cast_fp16)[name = tensor<string, []>("op_564_cast_fp16")];
402
- tensor<fp16, [1, 384, 1, 224]> var_566_cast_fp16 = mul(x = var_54_cast_fp16_2, y = var_129_cast_fp16)[name = tensor<string, []>("op_566_cast_fp16")];
403
- tensor<fp16, [1, 384, 1, 224]> value_9_cast_fp16 = add(x = var_564_cast_fp16, y = var_566_cast_fp16)[name = tensor<string, []>("value_9_cast_fp16")];
404
  tensor<int32, [4]> var_569 = const()[name = tensor<string, []>("op_569"), val = tensor<int32, [4]>([1, 6, 64, -1])];
405
  tensor<fp16, [1, 6, 64, 1]> var_570_cast_fp16 = reshape(shape = var_569, x = query_9_cast_fp16)[name = tensor<string, []>("op_570_cast_fp16")];
406
  tensor<fp16, []> var_571_to_fp16 = const()[name = tensor<string, []>("op_571_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
407
  tensor<fp16, [1, 6, 64, 1]> var_572_cast_fp16 = mul(x = var_570_cast_fp16, y = var_571_to_fp16)[name = tensor<string, []>("op_572_cast_fp16")];
408
  tensor<int32, [4]> var_573 = const()[name = tensor<string, []>("op_573"), val = tensor<int32, [4]>([1, 6, 64, -1])];
409
- tensor<fp16, [1, 6, 64, 224]> var_574_cast_fp16 = reshape(shape = var_573, x = key_9_cast_fp16)[name = tensor<string, []>("op_574_cast_fp16")];
410
  tensor<bool, []> mh_w_13_transpose_x_0 = const()[name = tensor<string, []>("mh_w_13_transpose_x_0"), val = tensor<bool, []>(true)];
411
  tensor<bool, []> mh_w_13_transpose_y_0 = const()[name = tensor<string, []>("mh_w_13_transpose_y_0"), val = tensor<bool, []>(false)];
412
- tensor<fp16, [1, 6, 1, 224]> mh_w_13_cast_fp16 = matmul(transpose_x = mh_w_13_transpose_x_0, transpose_y = mh_w_13_transpose_y_0, x = var_572_cast_fp16, y = var_574_cast_fp16)[name = tensor<string, []>("mh_w_13_cast_fp16")];
413
- tensor<fp16, [1, 6, 1, 224]> mh_w_15_cast_fp16 = add(x = mh_w_13_cast_fp16, y = var_147_cast_fp16)[name = tensor<string, []>("mh_w_15_cast_fp16")];
414
- tensor<fp16, [1, 6, 1, 224]> var_582_cast_fp16 = softmax(axis = var_496, x = mh_w_15_cast_fp16)[name = tensor<string, []>("op_582_cast_fp16")];
415
  tensor<int32, [4]> var_583 = const()[name = tensor<string, []>("op_583"), val = tensor<int32, [4]>([1, 6, 64, -1])];
416
- tensor<fp16, [1, 6, 64, 224]> var_584_cast_fp16 = reshape(shape = var_583, x = value_9_cast_fp16)[name = tensor<string, []>("op_584_cast_fp16")];
417
  tensor<bool, []> attn_9_transpose_x_0 = const()[name = tensor<string, []>("attn_9_transpose_x_0"), val = tensor<bool, []>(false)];
418
  tensor<bool, []> attn_9_transpose_y_0 = const()[name = tensor<string, []>("attn_9_transpose_y_0"), val = tensor<bool, []>(true)];
419
  tensor<fp16, [1, 6, 64, 1]> attn_9_cast_fp16 = matmul(transpose_x = attn_9_transpose_x_0, transpose_y = attn_9_transpose_y_0, x = var_584_cast_fp16, y = var_582_cast_fp16)[name = tensor<string, []>("attn_9_cast_fp16")];
@@ -557,25 +557,25 @@ program(1.0)
557
  tensor<fp16, [384, 384, 1, 1]> layers_3_self_attn_v_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_3_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(54969472)))];
558
  tensor<fp16, [384]> layers_3_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_3_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(55264448)))];
559
  tensor<fp16, [1, 384, 1, 1]> current_value_cast_fp16 = conv(bias = layers_3_self_attn_v_proj_bias_to_fp16, dilations = var_771, groups = var_721, pad = current_value_pad_0, pad_type = current_value_pad_type_0, strides = var_769, weight = layers_3_self_attn_v_proj_weight_to_fp16, x = obj_43_cast_fp16)[name = tensor<string, []>("current_value_cast_fp16")];
560
- tensor<fp16, [1, 384, 1, 224]> var_778_cast_fp16 = mul(x = current_key_cast_fp16, y = var_126_cast_fp16)[name = tensor<string, []>("op_778_cast_fp16")];
561
- tensor<fp16, [1, 384, 1, 224]> var_780_cast_fp16 = mul(x = var_47_cast_fp16_3, y = var_129_cast_fp16)[name = tensor<string, []>("op_780_cast_fp16")];
562
- tensor<fp16, [1, 384, 1, 224]> key_13_cast_fp16 = add(x = var_778_cast_fp16, y = var_780_cast_fp16)[name = tensor<string, []>("key_13_cast_fp16")];
563
- tensor<fp16, [1, 384, 1, 224]> var_782_cast_fp16 = mul(x = current_value_cast_fp16, y = var_126_cast_fp16)[name = tensor<string, []>("op_782_cast_fp16")];
564
- tensor<fp16, [1, 384, 1, 224]> var_784_cast_fp16 = mul(x = var_54_cast_fp16_3, y = var_129_cast_fp16)[name = tensor<string, []>("op_784_cast_fp16")];
565
- tensor<fp16, [1, 384, 1, 224]> value_13_cast_fp16 = add(x = var_782_cast_fp16, y = var_784_cast_fp16)[name = tensor<string, []>("value_13_cast_fp16")];
566
  tensor<int32, [4]> var_787 = const()[name = tensor<string, []>("op_787"), val = tensor<int32, [4]>([1, 6, 64, -1])];
567
  tensor<fp16, [1, 6, 64, 1]> var_788_cast_fp16 = reshape(shape = var_787, x = query_13_cast_fp16)[name = tensor<string, []>("op_788_cast_fp16")];
568
  tensor<fp16, []> var_789_to_fp16 = const()[name = tensor<string, []>("op_789_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
569
  tensor<fp16, [1, 6, 64, 1]> var_790_cast_fp16 = mul(x = var_788_cast_fp16, y = var_789_to_fp16)[name = tensor<string, []>("op_790_cast_fp16")];
570
  tensor<int32, [4]> var_791 = const()[name = tensor<string, []>("op_791"), val = tensor<int32, [4]>([1, 6, 64, -1])];
571
- tensor<fp16, [1, 6, 64, 224]> var_792_cast_fp16 = reshape(shape = var_791, x = key_13_cast_fp16)[name = tensor<string, []>("op_792_cast_fp16")];
572
  tensor<bool, []> mh_w_19_transpose_x_0 = const()[name = tensor<string, []>("mh_w_19_transpose_x_0"), val = tensor<bool, []>(true)];
573
  tensor<bool, []> mh_w_19_transpose_y_0 = const()[name = tensor<string, []>("mh_w_19_transpose_y_0"), val = tensor<bool, []>(false)];
574
- tensor<fp16, [1, 6, 1, 224]> mh_w_19_cast_fp16 = matmul(transpose_x = mh_w_19_transpose_x_0, transpose_y = mh_w_19_transpose_y_0, x = var_790_cast_fp16, y = var_792_cast_fp16)[name = tensor<string, []>("mh_w_19_cast_fp16")];
575
- tensor<fp16, [1, 6, 1, 224]> mh_w_21_cast_fp16 = add(x = mh_w_19_cast_fp16, y = var_147_cast_fp16)[name = tensor<string, []>("mh_w_21_cast_fp16")];
576
- tensor<fp16, [1, 6, 1, 224]> var_800_cast_fp16 = softmax(axis = var_714, x = mh_w_21_cast_fp16)[name = tensor<string, []>("op_800_cast_fp16")];
577
  tensor<int32, [4]> var_801 = const()[name = tensor<string, []>("op_801"), val = tensor<int32, [4]>([1, 6, 64, -1])];
578
- tensor<fp16, [1, 6, 64, 224]> var_802_cast_fp16 = reshape(shape = var_801, x = value_13_cast_fp16)[name = tensor<string, []>("op_802_cast_fp16")];
579
  tensor<bool, []> attn_13_transpose_x_0 = const()[name = tensor<string, []>("attn_13_transpose_x_0"), val = tensor<bool, []>(false)];
580
  tensor<bool, []> attn_13_transpose_y_0 = const()[name = tensor<string, []>("attn_13_transpose_y_0"), val = tensor<bool, []>(true)];
581
  tensor<fp16, [1, 6, 64, 1]> attn_13_cast_fp16 = matmul(transpose_x = attn_13_transpose_x_0, transpose_y = attn_13_transpose_y_0, x = var_802_cast_fp16, y = var_800_cast_fp16)[name = tensor<string, []>("attn_13_cast_fp16")];
 
1
  program(1.0)
2
  [buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "5.33.5"}, {"coremlc-version", "1877.40.3"}, {"coremltools-component-torch", "2.2.1"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "7.1"}})]
3
  {
4
+ func main<ios16>(tensor<int32, [1]> cache_length, tensor<fp16, [1, 448]> decoder_key_padding_mask, tensor<fp16, [1, 384, 1, 1500]> encoder_output_embeds, tensor<int32, [1]> input_ids, tensor<fp16, [1, 1536, 1, 448]> key_cache, tensor<fp16, [1, 448]> kv_cache_update_mask, tensor<fp16, [1, 1536, 1, 448]> value_cache) {
5
  tensor<int32, []> var_24_axis_0 = const()[name = tensor<string, []>("op_24_axis_0"), val = tensor<int32, []>(0)];
6
  tensor<int32, []> var_24_batch_dims_0 = const()[name = tensor<string, []>("op_24_batch_dims_0"), val = tensor<int32, []>(0)];
7
  tensor<fp16, [51864, 384]> embed_tokens_weight_to_fp16 = const()[name = tensor<string, []>("embed_tokens_weight_to_fp16"), val = tensor<fp16, [51864, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
 
17
  tensor<fp16, [1, 384, 1, 1]> inputs_1_cast_fp16 = expand_dims(axes = inputs_1_axes_0, x = var_42_cast_fp16)[name = tensor<string, []>("inputs_1_cast_fp16")];
18
  tensor<int32, [4]> tile_0 = const()[name = tensor<string, []>("tile_0"), val = tensor<int32, [4]>([384, 384, 384, 384])];
19
  tensor<int32, []> var_47_axis_0 = const()[name = tensor<string, []>("op_47_axis_0"), val = tensor<int32, []>(1)];
20
+ tensor<fp16, [1, 384, 1, 448]> var_47_cast_fp16_0, tensor<fp16, [1, 384, 1, 448]> var_47_cast_fp16_1, tensor<fp16, [1, 384, 1, 448]> var_47_cast_fp16_2, tensor<fp16, [1, 384, 1, 448]> var_47_cast_fp16_3 = split(axis = var_47_axis_0, split_sizes = tile_0, x = key_cache)[name = tensor<string, []>("op_47_cast_fp16")];
21
  tensor<int32, [4]> tile_1 = const()[name = tensor<string, []>("tile_1"), val = tensor<int32, [4]>([384, 384, 384, 384])];
22
  tensor<int32, []> var_54_axis_0 = const()[name = tensor<string, []>("op_54_axis_0"), val = tensor<int32, []>(1)];
23
+ tensor<fp16, [1, 384, 1, 448]> var_54_cast_fp16_0, tensor<fp16, [1, 384, 1, 448]> var_54_cast_fp16_1, tensor<fp16, [1, 384, 1, 448]> var_54_cast_fp16_2, tensor<fp16, [1, 384, 1, 448]> var_54_cast_fp16_3 = split(axis = var_54_axis_0, split_sizes = tile_1, x = value_cache)[name = tensor<string, []>("op_54_cast_fp16")];
24
  tensor<int32, []> var_64 = const()[name = tensor<string, []>("op_64"), val = tensor<int32, []>(3)];
25
  tensor<int32, []> var_71 = const()[name = tensor<string, []>("op_71"), val = tensor<int32, []>(1)];
26
  tensor<bool, []> var_72 = const()[name = tensor<string, []>("op_72"), val = tensor<bool, []>(true)];
 
62
  tensor<fp16, [384]> layers_0_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(41064896)))];
63
  tensor<fp16, [1, 384, 1, 1]> current_value_1_cast_fp16 = conv(bias = layers_0_self_attn_v_proj_bias_to_fp16, dilations = var_121, groups = var_71, pad = current_value_1_pad_0, pad_type = current_value_1_pad_type_0, strides = var_119, weight = layers_0_self_attn_v_proj_weight_to_fp16, x = obj_1_cast_fp16)[name = tensor<string, []>("current_value_1_cast_fp16")];
64
  tensor<int32, [1]> var_125_axes_0 = const()[name = tensor<string, []>("op_125_axes_0"), val = tensor<int32, [1]>([1])];
65
+ tensor<fp16, [1, 1, 448]> var_125_cast_fp16 = expand_dims(axes = var_125_axes_0, x = kv_cache_update_mask)[name = tensor<string, []>("op_125_cast_fp16")];
66
  tensor<int32, [1]> var_126_axes_0 = const()[name = tensor<string, []>("op_126_axes_0"), val = tensor<int32, [1]>([2])];
67
+ tensor<fp16, [1, 1, 1, 448]> var_126_cast_fp16 = expand_dims(axes = var_126_axes_0, x = var_125_cast_fp16)[name = tensor<string, []>("op_126_cast_fp16")];
68
+ tensor<fp16, [1, 384, 1, 448]> var_128_cast_fp16 = mul(x = current_key_1_cast_fp16, y = var_126_cast_fp16)[name = tensor<string, []>("op_128_cast_fp16")];
69
  tensor<fp16, []> var_65_to_fp16 = const()[name = tensor<string, []>("op_65_to_fp16"), val = tensor<fp16, []>(0x1p+0)];
70
+ tensor<fp16, [1, 1, 1, 448]> var_129_cast_fp16 = sub(x = var_65_to_fp16, y = var_126_cast_fp16)[name = tensor<string, []>("op_129_cast_fp16")];
71
+ tensor<fp16, [1, 384, 1, 448]> var_130_cast_fp16 = mul(x = var_47_cast_fp16_0, y = var_129_cast_fp16)[name = tensor<string, []>("op_130_cast_fp16")];
72
+ tensor<fp16, [1, 384, 1, 448]> key_1_cast_fp16 = add(x = var_128_cast_fp16, y = var_130_cast_fp16)[name = tensor<string, []>("key_1_cast_fp16")];
73
+ tensor<fp16, [1, 384, 1, 448]> var_132_cast_fp16 = mul(x = current_value_1_cast_fp16, y = var_126_cast_fp16)[name = tensor<string, []>("op_132_cast_fp16")];
74
+ tensor<fp16, [1, 384, 1, 448]> var_134_cast_fp16 = mul(x = var_54_cast_fp16_0, y = var_129_cast_fp16)[name = tensor<string, []>("op_134_cast_fp16")];
75
+ tensor<fp16, [1, 384, 1, 448]> value_1_cast_fp16 = add(x = var_132_cast_fp16, y = var_134_cast_fp16)[name = tensor<string, []>("value_1_cast_fp16")];
76
  tensor<int32, [4]> var_137 = const()[name = tensor<string, []>("op_137"), val = tensor<int32, [4]>([1, 6, 64, -1])];
77
  tensor<fp16, [1, 6, 64, 1]> var_138_cast_fp16 = reshape(shape = var_137, x = query_1_cast_fp16)[name = tensor<string, []>("op_138_cast_fp16")];
78
  tensor<fp16, []> var_139_to_fp16 = const()[name = tensor<string, []>("op_139_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
79
  tensor<fp16, [1, 6, 64, 1]> var_140_cast_fp16 = mul(x = var_138_cast_fp16, y = var_139_to_fp16)[name = tensor<string, []>("op_140_cast_fp16")];
80
  tensor<int32, [4]> var_141 = const()[name = tensor<string, []>("op_141"), val = tensor<int32, [4]>([1, 6, 64, -1])];
81
+ tensor<fp16, [1, 6, 64, 448]> var_142_cast_fp16 = reshape(shape = var_141, x = key_1_cast_fp16)[name = tensor<string, []>("op_142_cast_fp16")];
82
  tensor<bool, []> mh_w_1_transpose_x_0 = const()[name = tensor<string, []>("mh_w_1_transpose_x_0"), val = tensor<bool, []>(true)];
83
  tensor<bool, []> mh_w_1_transpose_y_0 = const()[name = tensor<string, []>("mh_w_1_transpose_y_0"), val = tensor<bool, []>(false)];
84
+ tensor<fp16, [1, 6, 1, 448]> mh_w_1_cast_fp16 = matmul(transpose_x = mh_w_1_transpose_x_0, transpose_y = mh_w_1_transpose_y_0, x = var_140_cast_fp16, y = var_142_cast_fp16)[name = tensor<string, []>("mh_w_1_cast_fp16")];
85
  tensor<int32, [1]> var_146_axes_0 = const()[name = tensor<string, []>("op_146_axes_0"), val = tensor<int32, [1]>([1])];
86
+ tensor<fp16, [1, 1, 448]> var_146_cast_fp16 = expand_dims(axes = var_146_axes_0, x = decoder_key_padding_mask)[name = tensor<string, []>("op_146_cast_fp16")];
87
  tensor<int32, [1]> var_147_axes_0 = const()[name = tensor<string, []>("op_147_axes_0"), val = tensor<int32, [1]>([2])];
88
+ tensor<fp16, [1, 1, 1, 448]> var_147_cast_fp16 = expand_dims(axes = var_147_axes_0, x = var_146_cast_fp16)[name = tensor<string, []>("op_147_cast_fp16")];
89
+ tensor<fp16, [1, 6, 1, 448]> mh_w_3_cast_fp16 = add(x = mh_w_1_cast_fp16, y = var_147_cast_fp16)[name = tensor<string, []>("mh_w_3_cast_fp16")];
90
+ tensor<fp16, [1, 6, 1, 448]> var_150_cast_fp16 = softmax(axis = var_64, x = mh_w_3_cast_fp16)[name = tensor<string, []>("op_150_cast_fp16")];
91
  tensor<int32, [4]> var_151 = const()[name = tensor<string, []>("op_151"), val = tensor<int32, [4]>([1, 6, 64, -1])];
92
+ tensor<fp16, [1, 6, 64, 448]> var_152_cast_fp16 = reshape(shape = var_151, x = value_1_cast_fp16)[name = tensor<string, []>("op_152_cast_fp16")];
93
  tensor<bool, []> attn_1_transpose_x_0 = const()[name = tensor<string, []>("attn_1_transpose_x_0"), val = tensor<bool, []>(false)];
94
  tensor<bool, []> attn_1_transpose_y_0 = const()[name = tensor<string, []>("attn_1_transpose_y_0"), val = tensor<bool, []>(true)];
95
  tensor<fp16, [1, 6, 64, 1]> attn_1_cast_fp16 = matmul(transpose_x = attn_1_transpose_x_0, transpose_y = attn_1_transpose_y_0, x = var_152_cast_fp16, y = var_150_cast_fp16)[name = tensor<string, []>("attn_1_cast_fp16")];
 
233
  tensor<fp16, [384, 384, 1, 1]> layers_1_self_attn_v_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(45503104)))];
234
  tensor<fp16, [384]> layers_1_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(45798080)))];
235
  tensor<fp16, [1, 384, 1, 1]> current_value_3_cast_fp16 = conv(bias = layers_1_self_attn_v_proj_bias_to_fp16, dilations = var_335, groups = var_285, pad = current_value_3_pad_0, pad_type = current_value_3_pad_type_0, strides = var_333, weight = layers_1_self_attn_v_proj_weight_to_fp16, x = obj_15_cast_fp16)[name = tensor<string, []>("current_value_3_cast_fp16")];
236
+ tensor<fp16, [1, 384, 1, 448]> var_342_cast_fp16 = mul(x = current_key_3_cast_fp16, y = var_126_cast_fp16)[name = tensor<string, []>("op_342_cast_fp16")];
237
+ tensor<fp16, [1, 384, 1, 448]> var_344_cast_fp16 = mul(x = var_47_cast_fp16_1, y = var_129_cast_fp16)[name = tensor<string, []>("op_344_cast_fp16")];
238
+ tensor<fp16, [1, 384, 1, 448]> key_5_cast_fp16 = add(x = var_342_cast_fp16, y = var_344_cast_fp16)[name = tensor<string, []>("key_5_cast_fp16")];
239
+ tensor<fp16, [1, 384, 1, 448]> var_346_cast_fp16 = mul(x = current_value_3_cast_fp16, y = var_126_cast_fp16)[name = tensor<string, []>("op_346_cast_fp16")];
240
+ tensor<fp16, [1, 384, 1, 448]> var_348_cast_fp16 = mul(x = var_54_cast_fp16_1, y = var_129_cast_fp16)[name = tensor<string, []>("op_348_cast_fp16")];
241
+ tensor<fp16, [1, 384, 1, 448]> value_5_cast_fp16 = add(x = var_346_cast_fp16, y = var_348_cast_fp16)[name = tensor<string, []>("value_5_cast_fp16")];
242
  tensor<int32, [4]> var_351 = const()[name = tensor<string, []>("op_351"), val = tensor<int32, [4]>([1, 6, 64, -1])];
243
  tensor<fp16, [1, 6, 64, 1]> var_352_cast_fp16 = reshape(shape = var_351, x = query_5_cast_fp16)[name = tensor<string, []>("op_352_cast_fp16")];
244
  tensor<fp16, []> var_353_to_fp16 = const()[name = tensor<string, []>("op_353_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
245
  tensor<fp16, [1, 6, 64, 1]> var_354_cast_fp16 = mul(x = var_352_cast_fp16, y = var_353_to_fp16)[name = tensor<string, []>("op_354_cast_fp16")];
246
  tensor<int32, [4]> var_355 = const()[name = tensor<string, []>("op_355"), val = tensor<int32, [4]>([1, 6, 64, -1])];
247
+ tensor<fp16, [1, 6, 64, 448]> var_356_cast_fp16 = reshape(shape = var_355, x = key_5_cast_fp16)[name = tensor<string, []>("op_356_cast_fp16")];
248
  tensor<bool, []> mh_w_7_transpose_x_0 = const()[name = tensor<string, []>("mh_w_7_transpose_x_0"), val = tensor<bool, []>(true)];
249
  tensor<bool, []> mh_w_7_transpose_y_0 = const()[name = tensor<string, []>("mh_w_7_transpose_y_0"), val = tensor<bool, []>(false)];
250
+ tensor<fp16, [1, 6, 1, 448]> mh_w_7_cast_fp16 = matmul(transpose_x = mh_w_7_transpose_x_0, transpose_y = mh_w_7_transpose_y_0, x = var_354_cast_fp16, y = var_356_cast_fp16)[name = tensor<string, []>("mh_w_7_cast_fp16")];
251
+ tensor<fp16, [1, 6, 1, 448]> mh_w_9_cast_fp16 = add(x = mh_w_7_cast_fp16, y = var_147_cast_fp16)[name = tensor<string, []>("mh_w_9_cast_fp16")];
252
+ tensor<fp16, [1, 6, 1, 448]> var_364_cast_fp16 = softmax(axis = var_278, x = mh_w_9_cast_fp16)[name = tensor<string, []>("op_364_cast_fp16")];
253
  tensor<int32, [4]> var_365 = const()[name = tensor<string, []>("op_365"), val = tensor<int32, [4]>([1, 6, 64, -1])];
254
+ tensor<fp16, [1, 6, 64, 448]> var_366_cast_fp16 = reshape(shape = var_365, x = value_5_cast_fp16)[name = tensor<string, []>("op_366_cast_fp16")];
255
  tensor<bool, []> attn_5_transpose_x_0 = const()[name = tensor<string, []>("attn_5_transpose_x_0"), val = tensor<bool, []>(false)];
256
  tensor<bool, []> attn_5_transpose_y_0 = const()[name = tensor<string, []>("attn_5_transpose_y_0"), val = tensor<bool, []>(true)];
257
  tensor<fp16, [1, 6, 64, 1]> attn_5_cast_fp16 = matmul(transpose_x = attn_5_transpose_x_0, transpose_y = attn_5_transpose_y_0, x = var_366_cast_fp16, y = var_364_cast_fp16)[name = tensor<string, []>("attn_5_cast_fp16")];
 
395
  tensor<fp16, [384, 384, 1, 1]> layers_2_self_attn_v_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_2_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(50236288)))];
396
  tensor<fp16, [384]> layers_2_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_2_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(50531264)))];
397
  tensor<fp16, [1, 384, 1, 1]> current_value_5_cast_fp16 = conv(bias = layers_2_self_attn_v_proj_bias_to_fp16, dilations = var_553, groups = var_503, pad = current_value_5_pad_0, pad_type = current_value_5_pad_type_0, strides = var_551, weight = layers_2_self_attn_v_proj_weight_to_fp16, x = obj_29_cast_fp16)[name = tensor<string, []>("current_value_5_cast_fp16")];
398
+ tensor<fp16, [1, 384, 1, 448]> var_560_cast_fp16 = mul(x = current_key_5_cast_fp16, y = var_126_cast_fp16)[name = tensor<string, []>("op_560_cast_fp16")];
399
+ tensor<fp16, [1, 384, 1, 448]> var_562_cast_fp16 = mul(x = var_47_cast_fp16_2, y = var_129_cast_fp16)[name = tensor<string, []>("op_562_cast_fp16")];
400
+ tensor<fp16, [1, 384, 1, 448]> key_9_cast_fp16 = add(x = var_560_cast_fp16, y = var_562_cast_fp16)[name = tensor<string, []>("key_9_cast_fp16")];
401
+ tensor<fp16, [1, 384, 1, 448]> var_564_cast_fp16 = mul(x = current_value_5_cast_fp16, y = var_126_cast_fp16)[name = tensor<string, []>("op_564_cast_fp16")];
402
+ tensor<fp16, [1, 384, 1, 448]> var_566_cast_fp16 = mul(x = var_54_cast_fp16_2, y = var_129_cast_fp16)[name = tensor<string, []>("op_566_cast_fp16")];
403
+ tensor<fp16, [1, 384, 1, 448]> value_9_cast_fp16 = add(x = var_564_cast_fp16, y = var_566_cast_fp16)[name = tensor<string, []>("value_9_cast_fp16")];
404
  tensor<int32, [4]> var_569 = const()[name = tensor<string, []>("op_569"), val = tensor<int32, [4]>([1, 6, 64, -1])];
405
  tensor<fp16, [1, 6, 64, 1]> var_570_cast_fp16 = reshape(shape = var_569, x = query_9_cast_fp16)[name = tensor<string, []>("op_570_cast_fp16")];
406
  tensor<fp16, []> var_571_to_fp16 = const()[name = tensor<string, []>("op_571_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
407
  tensor<fp16, [1, 6, 64, 1]> var_572_cast_fp16 = mul(x = var_570_cast_fp16, y = var_571_to_fp16)[name = tensor<string, []>("op_572_cast_fp16")];
408
  tensor<int32, [4]> var_573 = const()[name = tensor<string, []>("op_573"), val = tensor<int32, [4]>([1, 6, 64, -1])];
409
+ tensor<fp16, [1, 6, 64, 448]> var_574_cast_fp16 = reshape(shape = var_573, x = key_9_cast_fp16)[name = tensor<string, []>("op_574_cast_fp16")];
410
  tensor<bool, []> mh_w_13_transpose_x_0 = const()[name = tensor<string, []>("mh_w_13_transpose_x_0"), val = tensor<bool, []>(true)];
411
  tensor<bool, []> mh_w_13_transpose_y_0 = const()[name = tensor<string, []>("mh_w_13_transpose_y_0"), val = tensor<bool, []>(false)];
412
+ tensor<fp16, [1, 6, 1, 448]> mh_w_13_cast_fp16 = matmul(transpose_x = mh_w_13_transpose_x_0, transpose_y = mh_w_13_transpose_y_0, x = var_572_cast_fp16, y = var_574_cast_fp16)[name = tensor<string, []>("mh_w_13_cast_fp16")];
413
+ tensor<fp16, [1, 6, 1, 448]> mh_w_15_cast_fp16 = add(x = mh_w_13_cast_fp16, y = var_147_cast_fp16)[name = tensor<string, []>("mh_w_15_cast_fp16")];
414
+ tensor<fp16, [1, 6, 1, 448]> var_582_cast_fp16 = softmax(axis = var_496, x = mh_w_15_cast_fp16)[name = tensor<string, []>("op_582_cast_fp16")];
415
  tensor<int32, [4]> var_583 = const()[name = tensor<string, []>("op_583"), val = tensor<int32, [4]>([1, 6, 64, -1])];
416
+ tensor<fp16, [1, 6, 64, 448]> var_584_cast_fp16 = reshape(shape = var_583, x = value_9_cast_fp16)[name = tensor<string, []>("op_584_cast_fp16")];
417
  tensor<bool, []> attn_9_transpose_x_0 = const()[name = tensor<string, []>("attn_9_transpose_x_0"), val = tensor<bool, []>(false)];
418
  tensor<bool, []> attn_9_transpose_y_0 = const()[name = tensor<string, []>("attn_9_transpose_y_0"), val = tensor<bool, []>(true)];
419
  tensor<fp16, [1, 6, 64, 1]> attn_9_cast_fp16 = matmul(transpose_x = attn_9_transpose_x_0, transpose_y = attn_9_transpose_y_0, x = var_584_cast_fp16, y = var_582_cast_fp16)[name = tensor<string, []>("attn_9_cast_fp16")];
 
557
  tensor<fp16, [384, 384, 1, 1]> layers_3_self_attn_v_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_3_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(54969472)))];
558
  tensor<fp16, [384]> layers_3_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_3_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(55264448)))];
559
  tensor<fp16, [1, 384, 1, 1]> current_value_cast_fp16 = conv(bias = layers_3_self_attn_v_proj_bias_to_fp16, dilations = var_771, groups = var_721, pad = current_value_pad_0, pad_type = current_value_pad_type_0, strides = var_769, weight = layers_3_self_attn_v_proj_weight_to_fp16, x = obj_43_cast_fp16)[name = tensor<string, []>("current_value_cast_fp16")];
560
+ tensor<fp16, [1, 384, 1, 448]> var_778_cast_fp16 = mul(x = current_key_cast_fp16, y = var_126_cast_fp16)[name = tensor<string, []>("op_778_cast_fp16")];
561
+ tensor<fp16, [1, 384, 1, 448]> var_780_cast_fp16 = mul(x = var_47_cast_fp16_3, y = var_129_cast_fp16)[name = tensor<string, []>("op_780_cast_fp16")];
562
+ tensor<fp16, [1, 384, 1, 448]> key_13_cast_fp16 = add(x = var_778_cast_fp16, y = var_780_cast_fp16)[name = tensor<string, []>("key_13_cast_fp16")];
563
+ tensor<fp16, [1, 384, 1, 448]> var_782_cast_fp16 = mul(x = current_value_cast_fp16, y = var_126_cast_fp16)[name = tensor<string, []>("op_782_cast_fp16")];
564
+ tensor<fp16, [1, 384, 1, 448]> var_784_cast_fp16 = mul(x = var_54_cast_fp16_3, y = var_129_cast_fp16)[name = tensor<string, []>("op_784_cast_fp16")];
565
+ tensor<fp16, [1, 384, 1, 448]> value_13_cast_fp16 = add(x = var_782_cast_fp16, y = var_784_cast_fp16)[name = tensor<string, []>("value_13_cast_fp16")];
566
  tensor<int32, [4]> var_787 = const()[name = tensor<string, []>("op_787"), val = tensor<int32, [4]>([1, 6, 64, -1])];
567
  tensor<fp16, [1, 6, 64, 1]> var_788_cast_fp16 = reshape(shape = var_787, x = query_13_cast_fp16)[name = tensor<string, []>("op_788_cast_fp16")];
568
  tensor<fp16, []> var_789_to_fp16 = const()[name = tensor<string, []>("op_789_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
569
  tensor<fp16, [1, 6, 64, 1]> var_790_cast_fp16 = mul(x = var_788_cast_fp16, y = var_789_to_fp16)[name = tensor<string, []>("op_790_cast_fp16")];
570
  tensor<int32, [4]> var_791 = const()[name = tensor<string, []>("op_791"), val = tensor<int32, [4]>([1, 6, 64, -1])];
571
+ tensor<fp16, [1, 6, 64, 448]> var_792_cast_fp16 = reshape(shape = var_791, x = key_13_cast_fp16)[name = tensor<string, []>("op_792_cast_fp16")];
572
  tensor<bool, []> mh_w_19_transpose_x_0 = const()[name = tensor<string, []>("mh_w_19_transpose_x_0"), val = tensor<bool, []>(true)];
573
  tensor<bool, []> mh_w_19_transpose_y_0 = const()[name = tensor<string, []>("mh_w_19_transpose_y_0"), val = tensor<bool, []>(false)];
574
+ tensor<fp16, [1, 6, 1, 448]> mh_w_19_cast_fp16 = matmul(transpose_x = mh_w_19_transpose_x_0, transpose_y = mh_w_19_transpose_y_0, x = var_790_cast_fp16, y = var_792_cast_fp16)[name = tensor<string, []>("mh_w_19_cast_fp16")];
575
+ tensor<fp16, [1, 6, 1, 448]> mh_w_21_cast_fp16 = add(x = mh_w_19_cast_fp16, y = var_147_cast_fp16)[name = tensor<string, []>("mh_w_21_cast_fp16")];
576
+ tensor<fp16, [1, 6, 1, 448]> var_800_cast_fp16 = softmax(axis = var_714, x = mh_w_21_cast_fp16)[name = tensor<string, []>("op_800_cast_fp16")];
577
  tensor<int32, [4]> var_801 = const()[name = tensor<string, []>("op_801"), val = tensor<int32, [4]>([1, 6, 64, -1])];
578
+ tensor<fp16, [1, 6, 64, 448]> var_802_cast_fp16 = reshape(shape = var_801, x = value_13_cast_fp16)[name = tensor<string, []>("op_802_cast_fp16")];
579
  tensor<bool, []> attn_13_transpose_x_0 = const()[name = tensor<string, []>("attn_13_transpose_x_0"), val = tensor<bool, []>(false)];
580
  tensor<bool, []> attn_13_transpose_y_0 = const()[name = tensor<string, []>("attn_13_transpose_y_0"), val = tensor<bool, []>(true)];
581
  tensor<fp16, [1, 6, 64, 1]> attn_13_cast_fp16 = matmul(transpose_x = attn_13_transpose_x_0, transpose_y = attn_13_transpose_y_0, x = var_802_cast_fp16, y = var_800_cast_fp16)[name = tensor<string, []>("attn_13_cast_fp16")];
openai_whisper-tiny.en/TextDecoder.mlmodelc/weights/weight.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3e647a216be345d47805bcd85dfae64840f154cfbb4c4f1d1b07da6a2dd8c72c
3
  size 59215664
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47002a1763930c674d91ee5de05440b73bdee3fe6e38be0934666a1bea414ca8
3
  size 59215664