Training in progress, step 1000
Browse files- config.json +13 -6
- model.safetensors +3 -0
- preprocessor_config.json +0 -0
- requirements.txt +3 -2
- run.sh +3 -3
- run_speech_recognition_seq2seq_streaming.py +5 -4
- tokenizer.json +0 -0
- tokenizer_config.json +4 -34
- training_args.bin +2 -2
- vocab.json +0 -0
config.json
CHANGED
@@ -2,15 +2,14 @@
|
|
2 |
"_name_or_path": "openai/whisper-medium",
|
3 |
"activation_dropout": 0.0,
|
4 |
"activation_function": "gelu",
|
|
|
5 |
"architectures": [
|
6 |
"WhisperForConditionalGeneration"
|
7 |
],
|
8 |
"attention_dropout": 0.0,
|
9 |
-
"begin_suppress_tokens":
|
10 |
-
220,
|
11 |
-
50257
|
12 |
-
],
|
13 |
"bos_token_id": 50257,
|
|
|
14 |
"d_model": 1024,
|
15 |
"decoder_attention_heads": 16,
|
16 |
"decoder_ffn_dim": 4096,
|
@@ -26,16 +25,24 @@
|
|
26 |
"forced_decoder_ids": null,
|
27 |
"init_std": 0.02,
|
28 |
"is_encoder_decoder": true,
|
29 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
"max_source_positions": 1500,
|
31 |
"max_target_positions": 448,
|
|
|
32 |
"model_type": "whisper",
|
33 |
"num_hidden_layers": 24,
|
34 |
"num_mel_bins": 80,
|
35 |
"pad_token_id": 50257,
|
36 |
"scale_embedding": false,
|
37 |
"torch_dtype": "float32",
|
38 |
-
"transformers_version": "4.
|
39 |
"use_cache": false,
|
|
|
40 |
"vocab_size": 51865
|
41 |
}
|
|
|
2 |
"_name_or_path": "openai/whisper-medium",
|
3 |
"activation_dropout": 0.0,
|
4 |
"activation_function": "gelu",
|
5 |
+
"apply_spec_augment": false,
|
6 |
"architectures": [
|
7 |
"WhisperForConditionalGeneration"
|
8 |
],
|
9 |
"attention_dropout": 0.0,
|
10 |
+
"begin_suppress_tokens": null,
|
|
|
|
|
|
|
11 |
"bos_token_id": 50257,
|
12 |
+
"classifier_proj_size": 256,
|
13 |
"d_model": 1024,
|
14 |
"decoder_attention_heads": 16,
|
15 |
"decoder_ffn_dim": 4096,
|
|
|
25 |
"forced_decoder_ids": null,
|
26 |
"init_std": 0.02,
|
27 |
"is_encoder_decoder": true,
|
28 |
+
"mask_feature_length": 10,
|
29 |
+
"mask_feature_min_masks": 0,
|
30 |
+
"mask_feature_prob": 0.0,
|
31 |
+
"mask_time_length": 10,
|
32 |
+
"mask_time_min_masks": 2,
|
33 |
+
"mask_time_prob": 0.05,
|
34 |
+
"max_length": null,
|
35 |
"max_source_positions": 1500,
|
36 |
"max_target_positions": 448,
|
37 |
+
"median_filter_width": 7,
|
38 |
"model_type": "whisper",
|
39 |
"num_hidden_layers": 24,
|
40 |
"num_mel_bins": 80,
|
41 |
"pad_token_id": 50257,
|
42 |
"scale_embedding": false,
|
43 |
"torch_dtype": "float32",
|
44 |
+
"transformers_version": "4.46.0.dev0",
|
45 |
"use_cache": false,
|
46 |
+
"use_weighted_layer_sum": false,
|
47 |
"vocab_size": 51865
|
48 |
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:89f033c6a1523e7a92a9f2bf559c6505550a389af90b896f6d72c7908da8cd6f
|
3 |
+
size 3055544304
|
preprocessor_config.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
requirements.txt
CHANGED
@@ -1,9 +1,10 @@
|
|
1 |
torch>=1.7
|
2 |
torchaudio
|
3 |
-
transformers
|
4 |
-
datasets
|
5 |
librosa
|
6 |
jiwer
|
7 |
evaluate>=0.3.0
|
8 |
more-itertools
|
9 |
tensorboard
|
|
|
|
1 |
torch>=1.7
|
2 |
torchaudio
|
3 |
+
git+https://github.com/huggingface/transformers
|
4 |
+
git+https://github.com/huggingface/datasets
|
5 |
librosa
|
6 |
jiwer
|
7 |
evaluate>=0.3.0
|
8 |
more-itertools
|
9 |
tensorboard
|
10 |
+
accelerate>=0.26.0
|
run.sh
CHANGED
@@ -9,16 +9,16 @@ WANDB_PROJECT=whisper-medium-eu \
|
|
9 |
--model_index_name="Whisper Medium Basque" \
|
10 |
--max_steps="8000" \
|
11 |
--output_dir="./" \
|
12 |
-
--per_device_train_batch_size="
|
13 |
--per_device_eval_batch_size="8" \
|
14 |
--gradient_accumulation_steps="1" \
|
15 |
--logging_steps="25" \
|
16 |
-
--learning_rate="
|
17 |
--warmup_steps="500" \
|
18 |
--evaluation_strategy="steps" \
|
19 |
--eval_steps="500" \
|
20 |
--save_strategy="steps" \
|
21 |
-
--save_steps="
|
22 |
--generation_max_length="225" \
|
23 |
--length_column_name="input_length" \
|
24 |
--max_duration_in_seconds="30" \
|
|
|
9 |
--model_index_name="Whisper Medium Basque" \
|
10 |
--max_steps="8000" \
|
11 |
--output_dir="./" \
|
12 |
+
--per_device_train_batch_size="16" \
|
13 |
--per_device_eval_batch_size="8" \
|
14 |
--gradient_accumulation_steps="1" \
|
15 |
--logging_steps="25" \
|
16 |
+
--learning_rate="6.25e-6" \
|
17 |
--warmup_steps="500" \
|
18 |
--evaluation_strategy="steps" \
|
19 |
--eval_steps="500" \
|
20 |
--save_strategy="steps" \
|
21 |
+
--save_steps="1000" \
|
22 |
--generation_max_length="225" \
|
23 |
--length_column_name="input_length" \
|
24 |
--max_duration_in_seconds="30" \
|
run_speech_recognition_seq2seq_streaming.py
CHANGED
@@ -274,7 +274,7 @@ def load_maybe_streaming_dataset(dataset_name, dataset_config_name, split="train
|
|
274 |
if "+" in split:
|
275 |
# load multiple splits separated by the `+` symbol with streaming mode
|
276 |
dataset_splits = [
|
277 |
-
load_dataset(dataset_name, dataset_config_name, split=split_name, streaming=streaming, **kwargs)
|
278 |
for split_name in split.split("+")
|
279 |
]
|
280 |
# interleave multiple splits to form one dataset
|
@@ -282,7 +282,7 @@ def load_maybe_streaming_dataset(dataset_name, dataset_config_name, split="train
|
|
282 |
return interleaved_dataset
|
283 |
else:
|
284 |
# load a single split *with* streaming mode
|
285 |
-
dataset = load_dataset(dataset_name, dataset_config_name, split=split, streaming=streaming, **kwargs)
|
286 |
return dataset
|
287 |
|
288 |
|
@@ -357,7 +357,8 @@ def main():
|
|
357 |
data_args.dataset_name,
|
358 |
data_args.dataset_config_name,
|
359 |
split=data_args.train_split_name,
|
360 |
-
|
|
|
361 |
streaming=data_args.streaming,
|
362 |
)
|
363 |
|
@@ -366,7 +367,7 @@ def main():
|
|
366 |
data_args.dataset_name,
|
367 |
data_args.dataset_config_name,
|
368 |
split=data_args.eval_split_name,
|
369 |
-
use_auth_token=True if model_args.use_auth_token else None,
|
370 |
streaming=data_args.streaming,
|
371 |
)
|
372 |
|
|
|
274 |
if "+" in split:
|
275 |
# load multiple splits separated by the `+` symbol with streaming mode
|
276 |
dataset_splits = [
|
277 |
+
load_dataset(dataset_name, dataset_config_name, split=split_name, streaming=streaming, trust_remote_code=True, **kwargs)
|
278 |
for split_name in split.split("+")
|
279 |
]
|
280 |
# interleave multiple splits to form one dataset
|
|
|
282 |
return interleaved_dataset
|
283 |
else:
|
284 |
# load a single split *with* streaming mode
|
285 |
+
dataset = load_dataset(dataset_name, dataset_config_name, split=split, streaming=streaming, trust_remote_code=True, **kwargs)
|
286 |
return dataset
|
287 |
|
288 |
|
|
|
357 |
data_args.dataset_name,
|
358 |
data_args.dataset_config_name,
|
359 |
split=data_args.train_split_name,
|
360 |
+
# xezpeleta
|
361 |
+
#use_auth_token=True if model_args.use_auth_token else None,
|
362 |
streaming=data_args.streaming,
|
363 |
)
|
364 |
|
|
|
367 |
data_args.dataset_name,
|
368 |
data_args.dataset_config_name,
|
369 |
split=data_args.eval_split_name,
|
370 |
+
#use_auth_token=True if model_args.use_auth_token else None,
|
371 |
streaming=data_args.streaming,
|
372 |
)
|
373 |
|
tokenizer.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
CHANGED
@@ -12976,44 +12976,14 @@
|
|
12976 |
"<|nocaptions|>",
|
12977 |
"<|notimestamps|>"
|
12978 |
],
|
12979 |
-
"bos_token":
|
12980 |
-
"__type": "AddedToken",
|
12981 |
-
"content": "<|endoftext|>",
|
12982 |
-
"lstrip": false,
|
12983 |
-
"normalized": true,
|
12984 |
-
"rstrip": false,
|
12985 |
-
"single_word": false
|
12986 |
-
},
|
12987 |
"clean_up_tokenization_spaces": true,
|
12988 |
-
"eos_token":
|
12989 |
-
"__type": "AddedToken",
|
12990 |
-
"content": "<|endoftext|>",
|
12991 |
-
"lstrip": false,
|
12992 |
-
"normalized": true,
|
12993 |
-
"rstrip": false,
|
12994 |
-
"single_word": false
|
12995 |
-
},
|
12996 |
"errors": "replace",
|
12997 |
"model_max_length": 1024,
|
12998 |
-
"
|
12999 |
-
"pad_token": {
|
13000 |
-
"__type": "AddedToken",
|
13001 |
-
"content": "<|endoftext|>",
|
13002 |
-
"lstrip": false,
|
13003 |
-
"normalized": true,
|
13004 |
-
"rstrip": false,
|
13005 |
-
"single_word": false
|
13006 |
-
},
|
13007 |
"processor_class": "WhisperProcessor",
|
13008 |
"return_attention_mask": false,
|
13009 |
-
"special_tokens_map_file": "/home/xezpeleta/.cache/huggingface/hub/models--openai--whisper-medium/snapshots/abdf7c39ab9d0397620ccaea8974cc764cd0953e/special_tokens_map.json",
|
13010 |
"tokenizer_class": "WhisperTokenizer",
|
13011 |
-
"unk_token":
|
13012 |
-
"__type": "AddedToken",
|
13013 |
-
"content": "<|endoftext|>",
|
13014 |
-
"lstrip": false,
|
13015 |
-
"normalized": true,
|
13016 |
-
"rstrip": false,
|
13017 |
-
"single_word": false
|
13018 |
-
}
|
13019 |
}
|
|
|
12976 |
"<|nocaptions|>",
|
12977 |
"<|notimestamps|>"
|
12978 |
],
|
12979 |
+
"bos_token": "<|endoftext|>",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12980 |
"clean_up_tokenization_spaces": true,
|
12981 |
+
"eos_token": "<|endoftext|>",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12982 |
"errors": "replace",
|
12983 |
"model_max_length": 1024,
|
12984 |
+
"pad_token": "<|endoftext|>",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12985 |
"processor_class": "WhisperProcessor",
|
12986 |
"return_attention_mask": false,
|
|
|
12987 |
"tokenizer_class": "WhisperTokenizer",
|
12988 |
+
"unk_token": "<|endoftext|>"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12989 |
}
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b300aa3f6d707e204c955909f5ec2cd2d146917c1914bbddf3758ed2ed2af738
|
3 |
+
size 5368
|
vocab.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|