aotrih commited on
Commit
14bd68a
1 Parent(s): c430619

whisperkittools-a8c3cdeab8da5d76a7b952aa74ffebfbcd44804b generated files: openai_whisper-tiny.en

Browse files
openai_whisper-tiny.en/MelSpectrogram.mlmodelc/weights/weight.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:be7067141d5c13b3a805a77ca5a9ee783328932b9c0add16b28a22fd7963897c
3
  size 354080
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cd50e70541b52e0ac59e73574e4ae6479cc1eac24646b088936894c74fe98b9
3
  size 354080
openai_whisper-tiny.en/TextDecoder.mlmodelc/analytics/coremldata.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9abbd99914ed45a2c3eed96465262d47c0b3cc4d71330f7e5809cb5237ef56ef
3
  size 243
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b34155e7a3c297f790bb5c5975f7f25d22059ee1f289c1c6bd9ffa964c57bfd1
3
  size 243
openai_whisper-tiny.en/TextDecoder.mlmodelc/coremldata.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f5495a1bd59e4b36e553959e230c20df4d7067199412e90f2fa206f4147ea8ec
3
  size 633
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f0bcfc0ac989fa020e05d1381c0d0514cab88342acb4c52bcac5abd626b15ec
3
  size 633
openai_whisper-tiny.en/TextDecoder.mlmodelc/model.mil CHANGED
@@ -65,9 +65,9 @@ program(1.0)
65
  tensor<fp16, [1, 384, 1, 448]> var_134_cast_fp16 = mul(x = var_54_cast_fp16_0, y = var_129_cast_fp16)[name = tensor<string, []>("op_134_cast_fp16")];
66
  tensor<fp16, [1, 384, 1, 448]> value_1_cast_fp16 = add(x = var_132_cast_fp16, y = var_134_cast_fp16)[name = tensor<string, []>("value_1_cast_fp16")];
67
  tensor<int32, [4]> var_137 = const()[name = tensor<string, []>("op_137"), val = tensor<int32, [4]>([1, 6, 64, -1])];
68
- tensor<fp16, [1, 6, 64, 1]> var_138_cast_fp16 = reshape(shape = var_137, x = query_1_cast_fp16)[name = tensor<string, []>("op_138_cast_fp16")];
69
  tensor<fp16, []> var_139_to_fp16 = const()[name = tensor<string, []>("op_139_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
70
- tensor<fp16, [1, 6, 64, 1]> var_140_cast_fp16 = mul(x = var_138_cast_fp16, y = var_139_to_fp16)[name = tensor<string, []>("op_140_cast_fp16")];
71
  tensor<int32, [4]> var_141 = const()[name = tensor<string, []>("op_141"), val = tensor<int32, [4]>([1, 6, 64, -1])];
72
  tensor<fp16, [1, 6, 64, 448]> var_142_cast_fp16 = reshape(shape = var_141, x = key_1_cast_fp16)[name = tensor<string, []>("op_142_cast_fp16")];
73
  tensor<bool, []> mh_w_1_transpose_x_0 = const()[name = tensor<string, []>("mh_w_1_transpose_x_0"), val = tensor<bool, []>(true)];
@@ -122,9 +122,9 @@ program(1.0)
122
  tensor<fp16, [384]> layers_0_encoder_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_encoder_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(42248960)))];
123
  tensor<fp16, [1, 384, 1, 1500]> value_3_cast_fp16 = conv(bias = layers_0_encoder_attn_v_proj_bias_to_fp16, dilations = var_208, groups = var_71, pad = value_3_pad_0, pad_type = value_3_pad_type_0, strides = var_206, weight = layers_0_encoder_attn_v_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor<string, []>("value_3_cast_fp16")];
124
  tensor<int32, [4]> var_212 = const()[name = tensor<string, []>("op_212"), val = tensor<int32, [4]>([1, 6, 64, -1])];
125
- tensor<fp16, [1, 6, 64, 1]> var_213_cast_fp16 = reshape(shape = var_212, x = query_3_cast_fp16)[name = tensor<string, []>("op_213_cast_fp16")];
126
  tensor<fp16, []> var_214_to_fp16 = const()[name = tensor<string, []>("op_214_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
127
- tensor<fp16, [1, 6, 64, 1]> var_215_cast_fp16 = mul(x = var_213_cast_fp16, y = var_214_to_fp16)[name = tensor<string, []>("op_215_cast_fp16")];
128
  tensor<int32, [4]> var_216 = const()[name = tensor<string, []>("op_216"), val = tensor<int32, [4]>([1, 6, 64, -1])];
129
  tensor<fp16, [1, 6, 64, 1500]> var_217_cast_fp16 = reshape(shape = var_216, x = key_3_cast_fp16)[name = tensor<string, []>("op_217_cast_fp16")];
130
  tensor<bool, []> mh_w_5_transpose_x_0 = const()[name = tensor<string, []>("mh_w_5_transpose_x_0"), val = tensor<bool, []>(true)];
@@ -206,9 +206,9 @@ program(1.0)
206
  tensor<fp16, [1, 384, 1, 448]> var_348_cast_fp16 = mul(x = var_54_cast_fp16_1, y = var_129_cast_fp16)[name = tensor<string, []>("op_348_cast_fp16")];
207
  tensor<fp16, [1, 384, 1, 448]> value_5_cast_fp16 = add(x = var_346_cast_fp16, y = var_348_cast_fp16)[name = tensor<string, []>("value_5_cast_fp16")];
208
  tensor<int32, [4]> var_351 = const()[name = tensor<string, []>("op_351"), val = tensor<int32, [4]>([1, 6, 64, -1])];
209
- tensor<fp16, [1, 6, 64, 1]> var_352_cast_fp16 = reshape(shape = var_351, x = query_5_cast_fp16)[name = tensor<string, []>("op_352_cast_fp16")];
210
  tensor<fp16, []> var_353_to_fp16 = const()[name = tensor<string, []>("op_353_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
211
- tensor<fp16, [1, 6, 64, 1]> var_354_cast_fp16 = mul(x = var_352_cast_fp16, y = var_353_to_fp16)[name = tensor<string, []>("op_354_cast_fp16")];
212
  tensor<int32, [4]> var_355 = const()[name = tensor<string, []>("op_355"), val = tensor<int32, [4]>([1, 6, 64, -1])];
213
  tensor<fp16, [1, 6, 64, 448]> var_356_cast_fp16 = reshape(shape = var_355, x = key_5_cast_fp16)[name = tensor<string, []>("op_356_cast_fp16")];
214
  tensor<bool, []> mh_w_7_transpose_x_0 = const()[name = tensor<string, []>("mh_w_7_transpose_x_0"), val = tensor<bool, []>(true)];
@@ -259,9 +259,9 @@ program(1.0)
259
  tensor<fp16, [384]> layers_1_encoder_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_encoder_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(46982144)))];
260
  tensor<fp16, [1, 384, 1, 1500]> value_7_cast_fp16 = conv(bias = layers_1_encoder_attn_v_proj_bias_to_fp16, dilations = var_422, groups = var_285, pad = value_7_pad_0, pad_type = value_7_pad_type_0, strides = var_420, weight = layers_1_encoder_attn_v_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor<string, []>("value_7_cast_fp16")];
261
  tensor<int32, [4]> var_426 = const()[name = tensor<string, []>("op_426"), val = tensor<int32, [4]>([1, 6, 64, -1])];
262
- tensor<fp16, [1, 6, 64, 1]> var_427_cast_fp16 = reshape(shape = var_426, x = query_7_cast_fp16)[name = tensor<string, []>("op_427_cast_fp16")];
263
  tensor<fp16, []> var_428_to_fp16 = const()[name = tensor<string, []>("op_428_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
264
- tensor<fp16, [1, 6, 64, 1]> var_429_cast_fp16 = mul(x = var_427_cast_fp16, y = var_428_to_fp16)[name = tensor<string, []>("op_429_cast_fp16")];
265
  tensor<int32, [4]> var_430 = const()[name = tensor<string, []>("op_430"), val = tensor<int32, [4]>([1, 6, 64, -1])];
266
  tensor<fp16, [1, 6, 64, 1500]> var_431_cast_fp16 = reshape(shape = var_430, x = key_7_cast_fp16)[name = tensor<string, []>("op_431_cast_fp16")];
267
  tensor<bool, []> mh_w_11_transpose_x_0 = const()[name = tensor<string, []>("mh_w_11_transpose_x_0"), val = tensor<bool, []>(true)];
@@ -343,9 +343,9 @@ program(1.0)
343
  tensor<fp16, [1, 384, 1, 448]> var_566_cast_fp16 = mul(x = var_54_cast_fp16_2, y = var_129_cast_fp16)[name = tensor<string, []>("op_566_cast_fp16")];
344
  tensor<fp16, [1, 384, 1, 448]> value_9_cast_fp16 = add(x = var_564_cast_fp16, y = var_566_cast_fp16)[name = tensor<string, []>("value_9_cast_fp16")];
345
  tensor<int32, [4]> var_569 = const()[name = tensor<string, []>("op_569"), val = tensor<int32, [4]>([1, 6, 64, -1])];
346
- tensor<fp16, [1, 6, 64, 1]> var_570_cast_fp16 = reshape(shape = var_569, x = query_9_cast_fp16)[name = tensor<string, []>("op_570_cast_fp16")];
347
  tensor<fp16, []> var_571_to_fp16 = const()[name = tensor<string, []>("op_571_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
348
- tensor<fp16, [1, 6, 64, 1]> var_572_cast_fp16 = mul(x = var_570_cast_fp16, y = var_571_to_fp16)[name = tensor<string, []>("op_572_cast_fp16")];
349
  tensor<int32, [4]> var_573 = const()[name = tensor<string, []>("op_573"), val = tensor<int32, [4]>([1, 6, 64, -1])];
350
  tensor<fp16, [1, 6, 64, 448]> var_574_cast_fp16 = reshape(shape = var_573, x = key_9_cast_fp16)[name = tensor<string, []>("op_574_cast_fp16")];
351
  tensor<bool, []> mh_w_13_transpose_x_0 = const()[name = tensor<string, []>("mh_w_13_transpose_x_0"), val = tensor<bool, []>(true)];
@@ -396,9 +396,9 @@ program(1.0)
396
  tensor<fp16, [384]> layers_2_encoder_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_2_encoder_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(51715328)))];
397
  tensor<fp16, [1, 384, 1, 1500]> value_11_cast_fp16 = conv(bias = layers_2_encoder_attn_v_proj_bias_to_fp16, dilations = var_640, groups = var_503, pad = value_11_pad_0, pad_type = value_11_pad_type_0, strides = var_638, weight = layers_2_encoder_attn_v_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor<string, []>("value_11_cast_fp16")];
398
  tensor<int32, [4]> var_644 = const()[name = tensor<string, []>("op_644"), val = tensor<int32, [4]>([1, 6, 64, -1])];
399
- tensor<fp16, [1, 6, 64, 1]> var_645_cast_fp16 = reshape(shape = var_644, x = query_11_cast_fp16)[name = tensor<string, []>("op_645_cast_fp16")];
400
  tensor<fp16, []> var_646_to_fp16 = const()[name = tensor<string, []>("op_646_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
401
- tensor<fp16, [1, 6, 64, 1]> var_647_cast_fp16 = mul(x = var_645_cast_fp16, y = var_646_to_fp16)[name = tensor<string, []>("op_647_cast_fp16")];
402
  tensor<int32, [4]> var_648 = const()[name = tensor<string, []>("op_648"), val = tensor<int32, [4]>([1, 6, 64, -1])];
403
  tensor<fp16, [1, 6, 64, 1500]> var_649_cast_fp16 = reshape(shape = var_648, x = key_11_cast_fp16)[name = tensor<string, []>("op_649_cast_fp16")];
404
  tensor<bool, []> mh_w_17_transpose_x_0 = const()[name = tensor<string, []>("mh_w_17_transpose_x_0"), val = tensor<bool, []>(true)];
@@ -480,9 +480,9 @@ program(1.0)
480
  tensor<fp16, [1, 384, 1, 448]> var_784_cast_fp16 = mul(x = var_54_cast_fp16_3, y = var_129_cast_fp16)[name = tensor<string, []>("op_784_cast_fp16")];
481
  tensor<fp16, [1, 384, 1, 448]> value_13_cast_fp16 = add(x = var_782_cast_fp16, y = var_784_cast_fp16)[name = tensor<string, []>("value_13_cast_fp16")];
482
  tensor<int32, [4]> var_787 = const()[name = tensor<string, []>("op_787"), val = tensor<int32, [4]>([1, 6, 64, -1])];
483
- tensor<fp16, [1, 6, 64, 1]> var_788_cast_fp16 = reshape(shape = var_787, x = query_13_cast_fp16)[name = tensor<string, []>("op_788_cast_fp16")];
484
  tensor<fp16, []> var_789_to_fp16 = const()[name = tensor<string, []>("op_789_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
485
- tensor<fp16, [1, 6, 64, 1]> var_790_cast_fp16 = mul(x = var_788_cast_fp16, y = var_789_to_fp16)[name = tensor<string, []>("op_790_cast_fp16")];
486
  tensor<int32, [4]> var_791 = const()[name = tensor<string, []>("op_791"), val = tensor<int32, [4]>([1, 6, 64, -1])];
487
  tensor<fp16, [1, 6, 64, 448]> var_792_cast_fp16 = reshape(shape = var_791, x = key_13_cast_fp16)[name = tensor<string, []>("op_792_cast_fp16")];
488
  tensor<bool, []> mh_w_19_transpose_x_0 = const()[name = tensor<string, []>("mh_w_19_transpose_x_0"), val = tensor<bool, []>(true)];
@@ -533,9 +533,9 @@ program(1.0)
533
  tensor<fp16, [384]> layers_3_encoder_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_3_encoder_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(56448512)))];
534
  tensor<fp16, [1, 384, 1, 1500]> value_cast_fp16 = conv(bias = layers_3_encoder_attn_v_proj_bias_to_fp16, dilations = var_858, groups = var_721, pad = value_pad_0, pad_type = value_pad_type_0, strides = var_856, weight = layers_3_encoder_attn_v_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor<string, []>("value_cast_fp16")];
535
  tensor<int32, [4]> var_862 = const()[name = tensor<string, []>("op_862"), val = tensor<int32, [4]>([1, 6, 64, -1])];
536
- tensor<fp16, [1, 6, 64, 1]> var_863_cast_fp16 = reshape(shape = var_862, x = query_cast_fp16)[name = tensor<string, []>("op_863_cast_fp16")];
537
  tensor<fp16, []> var_864_to_fp16 = const()[name = tensor<string, []>("op_864_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
538
- tensor<fp16, [1, 6, 64, 1]> var_865_cast_fp16 = mul(x = var_863_cast_fp16, y = var_864_to_fp16)[name = tensor<string, []>("op_865_cast_fp16")];
539
  tensor<int32, [4]> var_866 = const()[name = tensor<string, []>("op_866"), val = tensor<int32, [4]>([1, 6, 64, -1])];
540
  tensor<fp16, [1, 6, 64, 1500]> var_867_cast_fp16 = reshape(shape = var_866, x = key_cast_fp16)[name = tensor<string, []>("op_867_cast_fp16")];
541
  tensor<bool, []> mh_w_transpose_x_0 = const()[name = tensor<string, []>("mh_w_transpose_x_0"), val = tensor<bool, []>(true)];
 
65
  tensor<fp16, [1, 384, 1, 448]> var_134_cast_fp16 = mul(x = var_54_cast_fp16_0, y = var_129_cast_fp16)[name = tensor<string, []>("op_134_cast_fp16")];
66
  tensor<fp16, [1, 384, 1, 448]> value_1_cast_fp16 = add(x = var_132_cast_fp16, y = var_134_cast_fp16)[name = tensor<string, []>("value_1_cast_fp16")];
67
  tensor<int32, [4]> var_137 = const()[name = tensor<string, []>("op_137"), val = tensor<int32, [4]>([1, 6, 64, -1])];
68
+ tensor<fp16, [1, 6, 64, 1]> mh_q_1_cast_fp16 = reshape(shape = var_137, x = query_1_cast_fp16)[name = tensor<string, []>("mh_q_1_cast_fp16")];
69
  tensor<fp16, []> var_139_to_fp16 = const()[name = tensor<string, []>("op_139_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
70
+ tensor<fp16, [1, 6, 64, 1]> var_140_cast_fp16 = mul(x = mh_q_1_cast_fp16, y = var_139_to_fp16)[name = tensor<string, []>("op_140_cast_fp16")];
71
  tensor<int32, [4]> var_141 = const()[name = tensor<string, []>("op_141"), val = tensor<int32, [4]>([1, 6, 64, -1])];
72
  tensor<fp16, [1, 6, 64, 448]> var_142_cast_fp16 = reshape(shape = var_141, x = key_1_cast_fp16)[name = tensor<string, []>("op_142_cast_fp16")];
73
  tensor<bool, []> mh_w_1_transpose_x_0 = const()[name = tensor<string, []>("mh_w_1_transpose_x_0"), val = tensor<bool, []>(true)];
 
122
  tensor<fp16, [384]> layers_0_encoder_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_encoder_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(42248960)))];
123
  tensor<fp16, [1, 384, 1, 1500]> value_3_cast_fp16 = conv(bias = layers_0_encoder_attn_v_proj_bias_to_fp16, dilations = var_208, groups = var_71, pad = value_3_pad_0, pad_type = value_3_pad_type_0, strides = var_206, weight = layers_0_encoder_attn_v_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor<string, []>("value_3_cast_fp16")];
124
  tensor<int32, [4]> var_212 = const()[name = tensor<string, []>("op_212"), val = tensor<int32, [4]>([1, 6, 64, -1])];
125
+ tensor<fp16, [1, 6, 64, 1]> mh_q_3_cast_fp16 = reshape(shape = var_212, x = query_3_cast_fp16)[name = tensor<string, []>("mh_q_3_cast_fp16")];
126
  tensor<fp16, []> var_214_to_fp16 = const()[name = tensor<string, []>("op_214_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
127
+ tensor<fp16, [1, 6, 64, 1]> var_215_cast_fp16 = mul(x = mh_q_3_cast_fp16, y = var_214_to_fp16)[name = tensor<string, []>("op_215_cast_fp16")];
128
  tensor<int32, [4]> var_216 = const()[name = tensor<string, []>("op_216"), val = tensor<int32, [4]>([1, 6, 64, -1])];
129
  tensor<fp16, [1, 6, 64, 1500]> var_217_cast_fp16 = reshape(shape = var_216, x = key_3_cast_fp16)[name = tensor<string, []>("op_217_cast_fp16")];
130
  tensor<bool, []> mh_w_5_transpose_x_0 = const()[name = tensor<string, []>("mh_w_5_transpose_x_0"), val = tensor<bool, []>(true)];
 
206
  tensor<fp16, [1, 384, 1, 448]> var_348_cast_fp16 = mul(x = var_54_cast_fp16_1, y = var_129_cast_fp16)[name = tensor<string, []>("op_348_cast_fp16")];
207
  tensor<fp16, [1, 384, 1, 448]> value_5_cast_fp16 = add(x = var_346_cast_fp16, y = var_348_cast_fp16)[name = tensor<string, []>("value_5_cast_fp16")];
208
  tensor<int32, [4]> var_351 = const()[name = tensor<string, []>("op_351"), val = tensor<int32, [4]>([1, 6, 64, -1])];
209
+ tensor<fp16, [1, 6, 64, 1]> mh_q_5_cast_fp16 = reshape(shape = var_351, x = query_5_cast_fp16)[name = tensor<string, []>("mh_q_5_cast_fp16")];
210
  tensor<fp16, []> var_353_to_fp16 = const()[name = tensor<string, []>("op_353_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
211
+ tensor<fp16, [1, 6, 64, 1]> var_354_cast_fp16 = mul(x = mh_q_5_cast_fp16, y = var_353_to_fp16)[name = tensor<string, []>("op_354_cast_fp16")];
212
  tensor<int32, [4]> var_355 = const()[name = tensor<string, []>("op_355"), val = tensor<int32, [4]>([1, 6, 64, -1])];
213
  tensor<fp16, [1, 6, 64, 448]> var_356_cast_fp16 = reshape(shape = var_355, x = key_5_cast_fp16)[name = tensor<string, []>("op_356_cast_fp16")];
214
  tensor<bool, []> mh_w_7_transpose_x_0 = const()[name = tensor<string, []>("mh_w_7_transpose_x_0"), val = tensor<bool, []>(true)];
 
259
  tensor<fp16, [384]> layers_1_encoder_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_encoder_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(46982144)))];
260
  tensor<fp16, [1, 384, 1, 1500]> value_7_cast_fp16 = conv(bias = layers_1_encoder_attn_v_proj_bias_to_fp16, dilations = var_422, groups = var_285, pad = value_7_pad_0, pad_type = value_7_pad_type_0, strides = var_420, weight = layers_1_encoder_attn_v_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor<string, []>("value_7_cast_fp16")];
261
  tensor<int32, [4]> var_426 = const()[name = tensor<string, []>("op_426"), val = tensor<int32, [4]>([1, 6, 64, -1])];
262
+ tensor<fp16, [1, 6, 64, 1]> mh_q_7_cast_fp16 = reshape(shape = var_426, x = query_7_cast_fp16)[name = tensor<string, []>("mh_q_7_cast_fp16")];
263
  tensor<fp16, []> var_428_to_fp16 = const()[name = tensor<string, []>("op_428_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
264
+ tensor<fp16, [1, 6, 64, 1]> var_429_cast_fp16 = mul(x = mh_q_7_cast_fp16, y = var_428_to_fp16)[name = tensor<string, []>("op_429_cast_fp16")];
265
  tensor<int32, [4]> var_430 = const()[name = tensor<string, []>("op_430"), val = tensor<int32, [4]>([1, 6, 64, -1])];
266
  tensor<fp16, [1, 6, 64, 1500]> var_431_cast_fp16 = reshape(shape = var_430, x = key_7_cast_fp16)[name = tensor<string, []>("op_431_cast_fp16")];
267
  tensor<bool, []> mh_w_11_transpose_x_0 = const()[name = tensor<string, []>("mh_w_11_transpose_x_0"), val = tensor<bool, []>(true)];
 
343
  tensor<fp16, [1, 384, 1, 448]> var_566_cast_fp16 = mul(x = var_54_cast_fp16_2, y = var_129_cast_fp16)[name = tensor<string, []>("op_566_cast_fp16")];
344
  tensor<fp16, [1, 384, 1, 448]> value_9_cast_fp16 = add(x = var_564_cast_fp16, y = var_566_cast_fp16)[name = tensor<string, []>("value_9_cast_fp16")];
345
  tensor<int32, [4]> var_569 = const()[name = tensor<string, []>("op_569"), val = tensor<int32, [4]>([1, 6, 64, -1])];
346
+ tensor<fp16, [1, 6, 64, 1]> mh_q_9_cast_fp16 = reshape(shape = var_569, x = query_9_cast_fp16)[name = tensor<string, []>("mh_q_9_cast_fp16")];
347
  tensor<fp16, []> var_571_to_fp16 = const()[name = tensor<string, []>("op_571_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
348
+ tensor<fp16, [1, 6, 64, 1]> var_572_cast_fp16 = mul(x = mh_q_9_cast_fp16, y = var_571_to_fp16)[name = tensor<string, []>("op_572_cast_fp16")];
349
  tensor<int32, [4]> var_573 = const()[name = tensor<string, []>("op_573"), val = tensor<int32, [4]>([1, 6, 64, -1])];
350
  tensor<fp16, [1, 6, 64, 448]> var_574_cast_fp16 = reshape(shape = var_573, x = key_9_cast_fp16)[name = tensor<string, []>("op_574_cast_fp16")];
351
  tensor<bool, []> mh_w_13_transpose_x_0 = const()[name = tensor<string, []>("mh_w_13_transpose_x_0"), val = tensor<bool, []>(true)];
 
396
  tensor<fp16, [384]> layers_2_encoder_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_2_encoder_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(51715328)))];
397
  tensor<fp16, [1, 384, 1, 1500]> value_11_cast_fp16 = conv(bias = layers_2_encoder_attn_v_proj_bias_to_fp16, dilations = var_640, groups = var_503, pad = value_11_pad_0, pad_type = value_11_pad_type_0, strides = var_638, weight = layers_2_encoder_attn_v_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor<string, []>("value_11_cast_fp16")];
398
  tensor<int32, [4]> var_644 = const()[name = tensor<string, []>("op_644"), val = tensor<int32, [4]>([1, 6, 64, -1])];
399
+ tensor<fp16, [1, 6, 64, 1]> mh_q_11_cast_fp16 = reshape(shape = var_644, x = query_11_cast_fp16)[name = tensor<string, []>("mh_q_11_cast_fp16")];
400
  tensor<fp16, []> var_646_to_fp16 = const()[name = tensor<string, []>("op_646_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
401
+ tensor<fp16, [1, 6, 64, 1]> var_647_cast_fp16 = mul(x = mh_q_11_cast_fp16, y = var_646_to_fp16)[name = tensor<string, []>("op_647_cast_fp16")];
402
  tensor<int32, [4]> var_648 = const()[name = tensor<string, []>("op_648"), val = tensor<int32, [4]>([1, 6, 64, -1])];
403
  tensor<fp16, [1, 6, 64, 1500]> var_649_cast_fp16 = reshape(shape = var_648, x = key_11_cast_fp16)[name = tensor<string, []>("op_649_cast_fp16")];
404
  tensor<bool, []> mh_w_17_transpose_x_0 = const()[name = tensor<string, []>("mh_w_17_transpose_x_0"), val = tensor<bool, []>(true)];
 
480
  tensor<fp16, [1, 384, 1, 448]> var_784_cast_fp16 = mul(x = var_54_cast_fp16_3, y = var_129_cast_fp16)[name = tensor<string, []>("op_784_cast_fp16")];
481
  tensor<fp16, [1, 384, 1, 448]> value_13_cast_fp16 = add(x = var_782_cast_fp16, y = var_784_cast_fp16)[name = tensor<string, []>("value_13_cast_fp16")];
482
  tensor<int32, [4]> var_787 = const()[name = tensor<string, []>("op_787"), val = tensor<int32, [4]>([1, 6, 64, -1])];
483
+ tensor<fp16, [1, 6, 64, 1]> mh_q_13_cast_fp16 = reshape(shape = var_787, x = query_13_cast_fp16)[name = tensor<string, []>("mh_q_13_cast_fp16")];
484
  tensor<fp16, []> var_789_to_fp16 = const()[name = tensor<string, []>("op_789_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
485
+ tensor<fp16, [1, 6, 64, 1]> var_790_cast_fp16 = mul(x = mh_q_13_cast_fp16, y = var_789_to_fp16)[name = tensor<string, []>("op_790_cast_fp16")];
486
  tensor<int32, [4]> var_791 = const()[name = tensor<string, []>("op_791"), val = tensor<int32, [4]>([1, 6, 64, -1])];
487
  tensor<fp16, [1, 6, 64, 448]> var_792_cast_fp16 = reshape(shape = var_791, x = key_13_cast_fp16)[name = tensor<string, []>("op_792_cast_fp16")];
488
  tensor<bool, []> mh_w_19_transpose_x_0 = const()[name = tensor<string, []>("mh_w_19_transpose_x_0"), val = tensor<bool, []>(true)];
 
533
  tensor<fp16, [384]> layers_3_encoder_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_3_encoder_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(56448512)))];
534
  tensor<fp16, [1, 384, 1, 1500]> value_cast_fp16 = conv(bias = layers_3_encoder_attn_v_proj_bias_to_fp16, dilations = var_858, groups = var_721, pad = value_pad_0, pad_type = value_pad_type_0, strides = var_856, weight = layers_3_encoder_attn_v_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor<string, []>("value_cast_fp16")];
535
  tensor<int32, [4]> var_862 = const()[name = tensor<string, []>("op_862"), val = tensor<int32, [4]>([1, 6, 64, -1])];
536
+ tensor<fp16, [1, 6, 64, 1]> mh_q_cast_fp16 = reshape(shape = var_862, x = query_cast_fp16)[name = tensor<string, []>("mh_q_cast_fp16")];
537
  tensor<fp16, []> var_864_to_fp16 = const()[name = tensor<string, []>("op_864_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
538
+ tensor<fp16, [1, 6, 64, 1]> var_865_cast_fp16 = mul(x = mh_q_cast_fp16, y = var_864_to_fp16)[name = tensor<string, []>("op_865_cast_fp16")];
539
  tensor<int32, [4]> var_866 = const()[name = tensor<string, []>("op_866"), val = tensor<int32, [4]>([1, 6, 64, -1])];
540
  tensor<fp16, [1, 6, 64, 1500]> var_867_cast_fp16 = reshape(shape = var_866, x = key_cast_fp16)[name = tensor<string, []>("op_867_cast_fp16")];
541
  tensor<bool, []> mh_w_transpose_x_0 = const()[name = tensor<string, []>("mh_w_transpose_x_0"), val = tensor<bool, []>(true)];
openai_whisper-tiny.en/TextDecoder.mlmodelc/weights/weight.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a18097f2baf8507103ef5319af13ffea77cfa8ee5170e01cee84a63336b65fd7
3
  size 59215664
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d166727073ff54e03d3b5118767d30572968d529ce81521ac1606181113f71d
3
  size 59215664