anakib1 commited on
Commit
c20a42a
1 Parent(s): 8b381a2

Training in progress, epoch 1

Browse files
config.json ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "openai/whisper-tiny",
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "gelu",
5
+ "apply_spec_augment": false,
6
+ "architectures": [
7
+ "WhisperForConditionalGeneration"
8
+ ],
9
+ "attention_dropout": 0.0,
10
+ "begin_suppress_tokens": [
11
+ 220,
12
+ 50257
13
+ ],
14
+ "bos_token_id": 50257,
15
+ "classifier_proj_size": 256,
16
+ "d_model": 384,
17
+ "decoder_attention_heads": 6,
18
+ "decoder_ffn_dim": 1536,
19
+ "decoder_layerdrop": 0.0,
20
+ "decoder_layers": 4,
21
+ "decoder_start_token_id": 50258,
22
+ "dropout": 0.0,
23
+ "encoder_attention_heads": 6,
24
+ "encoder_ffn_dim": 1536,
25
+ "encoder_layerdrop": 0.0,
26
+ "encoder_layers": 4,
27
+ "eos_token_id": 50257,
28
+ "forced_decoder_ids": null,
29
+ "init_std": 0.02,
30
+ "is_encoder_decoder": true,
31
+ "mask_feature_length": 10,
32
+ "mask_feature_min_masks": 0,
33
+ "mask_feature_prob": 0.0,
34
+ "mask_time_length": 10,
35
+ "mask_time_min_masks": 2,
36
+ "mask_time_prob": 0.05,
37
+ "max_length": 448,
38
+ "max_source_positions": 1500,
39
+ "max_target_positions": 448,
40
+ "median_filter_width": 7,
41
+ "model_type": "whisper",
42
+ "num_hidden_layers": 4,
43
+ "num_mel_bins": 80,
44
+ "pad_token_id": 50257,
45
+ "scale_embedding": false,
46
+ "suppress_tokens": [
47
+ 1,
48
+ 2,
49
+ 7,
50
+ 8,
51
+ 9,
52
+ 10,
53
+ 14,
54
+ 25,
55
+ 26,
56
+ 27,
57
+ 28,
58
+ 29,
59
+ 31,
60
+ 58,
61
+ 59,
62
+ 60,
63
+ 61,
64
+ 62,
65
+ 63,
66
+ 90,
67
+ 91,
68
+ 92,
69
+ 93,
70
+ 359,
71
+ 503,
72
+ 522,
73
+ 542,
74
+ 873,
75
+ 893,
76
+ 902,
77
+ 918,
78
+ 922,
79
+ 931,
80
+ 1350,
81
+ 1853,
82
+ 1982,
83
+ 2460,
84
+ 2627,
85
+ 3246,
86
+ 3253,
87
+ 3268,
88
+ 3536,
89
+ 3846,
90
+ 3961,
91
+ 4183,
92
+ 4667,
93
+ 6585,
94
+ 6647,
95
+ 7273,
96
+ 9061,
97
+ 9383,
98
+ 10428,
99
+ 10929,
100
+ 11938,
101
+ 12033,
102
+ 12331,
103
+ 12562,
104
+ 13793,
105
+ 14157,
106
+ 14635,
107
+ 15265,
108
+ 15618,
109
+ 16553,
110
+ 16604,
111
+ 18362,
112
+ 18956,
113
+ 20075,
114
+ 21675,
115
+ 22520,
116
+ 26130,
117
+ 26161,
118
+ 26435,
119
+ 28279,
120
+ 29464,
121
+ 31650,
122
+ 32302,
123
+ 32470,
124
+ 36865,
125
+ 42863,
126
+ 47425,
127
+ 49870,
128
+ 50254,
129
+ 50258,
130
+ 50358,
131
+ 50359,
132
+ 50360,
133
+ 50361,
134
+ 50362
135
+ ],
136
+ "suppressed_tokens": [],
137
+ "torch_dtype": "float32",
138
+ "transformers_version": "4.36.2",
139
+ "use_cache": true,
140
+ "use_weighted_layer_sum": false,
141
+ "vocab_size": 51865
142
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45e423d4073e8bb8dbaf927226293337c98b18a6a22fa83c6e62c7c753eb033f
3
+ size 151061672
preprocessor_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "chunk_length": 30,
3
+ "feature_extractor_type": "WhisperFeatureExtractor",
4
+ "feature_size": 80,
5
+ "hop_length": 160,
6
+ "n_fft": 400,
7
+ "n_samples": 480000,
8
+ "nb_max_frames": 3000,
9
+ "padding_side": "right",
10
+ "padding_value": 0.0,
11
+ "processor_class": "WhisperProcessor",
12
+ "return_attention_mask": false,
13
+ "sampling_rate": 22050
14
+ }
runs/Jan13_09-14-30_d574813ccfe0/events.out.tfevents.1705137647.d574813ccfe0.26.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d12845bdf322e376a2c6558762e5a4070599252cabc2d57c4898f451348a916c
3
+ size 8010
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7323fab45a0152ff20c0efc1ea71d6ff22a7c47a2044efed4cf9de0174807f0
3
+ size 4475