marinone94
commited on
Commit
β’
a90a966
1
Parent(s):
2a4e5fa
Training in progress, step 20
Browse files- checkpoint-20/config.json +115 -0
- checkpoint-20/optimizer.pt +3 -0
- checkpoint-20/preprocessor_config.json +9 -0
- checkpoint-20/pytorch_model.bin +3 -0
- checkpoint-20/rng_state.pth +3 -0
- checkpoint-20/scaler.pt +3 -0
- checkpoint-20/scheduler.pt +3 -0
- checkpoint-20/trainer_state.json +37 -0
- checkpoint-20/training_args.bin +3 -0
- old_run.sh +4 -2
- old_run_asr_ctc.py +1 -1
- pytorch_model.bin +1 -1
- run.sh +8 -6
- run_speech_recognition_ctc.py +15 -6
- special_tokens_map.json +1 -1
- training_args.bin +1 -1
checkpoint-20/config.json
ADDED
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "KBLab/wav2vec2-large-voxrex",
|
3 |
+
"activation_dropout": 0.1,
|
4 |
+
"adapter_kernel_size": 3,
|
5 |
+
"adapter_stride": 2,
|
6 |
+
"add_adapter": false,
|
7 |
+
"apply_spec_augment": true,
|
8 |
+
"architectures": [
|
9 |
+
"Wav2Vec2ForCTC"
|
10 |
+
],
|
11 |
+
"attention_dropout": 0.0,
|
12 |
+
"bos_token_id": 1,
|
13 |
+
"classifier_proj_size": 256,
|
14 |
+
"codevector_dim": 768,
|
15 |
+
"contrastive_logits_temperature": 0.1,
|
16 |
+
"conv_bias": true,
|
17 |
+
"conv_dim": [
|
18 |
+
512,
|
19 |
+
512,
|
20 |
+
512,
|
21 |
+
512,
|
22 |
+
512,
|
23 |
+
512,
|
24 |
+
512
|
25 |
+
],
|
26 |
+
"conv_kernel": [
|
27 |
+
10,
|
28 |
+
3,
|
29 |
+
3,
|
30 |
+
3,
|
31 |
+
3,
|
32 |
+
2,
|
33 |
+
2
|
34 |
+
],
|
35 |
+
"conv_stride": [
|
36 |
+
5,
|
37 |
+
2,
|
38 |
+
2,
|
39 |
+
2,
|
40 |
+
2,
|
41 |
+
2,
|
42 |
+
2
|
43 |
+
],
|
44 |
+
"ctc_loss_reduction": "mean",
|
45 |
+
"ctc_zero_infinity": false,
|
46 |
+
"diversity_loss_weight": 0.1,
|
47 |
+
"do_stable_layer_norm": true,
|
48 |
+
"eos_token_id": 2,
|
49 |
+
"feat_extract_activation": "gelu",
|
50 |
+
"feat_extract_dropout": 0.0,
|
51 |
+
"feat_extract_norm": "layer",
|
52 |
+
"feat_proj_dropout": 0.0,
|
53 |
+
"feat_quantizer_dropout": 0.0,
|
54 |
+
"final_dropout": 0.0,
|
55 |
+
"hidden_act": "gelu",
|
56 |
+
"hidden_dropout": 0.0,
|
57 |
+
"hidden_size": 1024,
|
58 |
+
"initializer_range": 0.02,
|
59 |
+
"intermediate_size": 4096,
|
60 |
+
"layer_norm_eps": 1e-05,
|
61 |
+
"layerdrop": 0.0,
|
62 |
+
"mask_channel_length": 10,
|
63 |
+
"mask_channel_min_space": 1,
|
64 |
+
"mask_channel_other": 0.0,
|
65 |
+
"mask_channel_prob": 0.0,
|
66 |
+
"mask_channel_selection": "static",
|
67 |
+
"mask_feature_length": 64,
|
68 |
+
"mask_feature_min_masks": 0,
|
69 |
+
"mask_feature_prob": 0.25,
|
70 |
+
"mask_time_length": 10,
|
71 |
+
"mask_time_min_masks": 2,
|
72 |
+
"mask_time_min_space": 1,
|
73 |
+
"mask_time_other": 0.0,
|
74 |
+
"mask_time_prob": 0.75,
|
75 |
+
"mask_time_selection": "static",
|
76 |
+
"model_type": "wav2vec2",
|
77 |
+
"num_adapter_layers": 3,
|
78 |
+
"num_attention_heads": 16,
|
79 |
+
"num_codevector_groups": 2,
|
80 |
+
"num_codevectors_per_group": 320,
|
81 |
+
"num_conv_pos_embedding_groups": 16,
|
82 |
+
"num_conv_pos_embeddings": 128,
|
83 |
+
"num_feat_extract_layers": 7,
|
84 |
+
"num_hidden_layers": 24,
|
85 |
+
"num_negatives": 100,
|
86 |
+
"output_hidden_size": 1024,
|
87 |
+
"pad_token_id": 31,
|
88 |
+
"proj_codevector_dim": 768,
|
89 |
+
"tdnn_dilation": [
|
90 |
+
1,
|
91 |
+
2,
|
92 |
+
3,
|
93 |
+
1,
|
94 |
+
1
|
95 |
+
],
|
96 |
+
"tdnn_dim": [
|
97 |
+
512,
|
98 |
+
512,
|
99 |
+
512,
|
100 |
+
512,
|
101 |
+
1500
|
102 |
+
],
|
103 |
+
"tdnn_kernel": [
|
104 |
+
5,
|
105 |
+
3,
|
106 |
+
3,
|
107 |
+
1,
|
108 |
+
1
|
109 |
+
],
|
110 |
+
"torch_dtype": "float32",
|
111 |
+
"transformers_version": "4.17.0.dev0",
|
112 |
+
"use_weighted_layer_sum": false,
|
113 |
+
"vocab_size": 34,
|
114 |
+
"xvector_output_dim": 512
|
115 |
+
}
|
checkpoint-20/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:075df4bf25f6d34069d73115a8c7331da68aa4a7dd3f23353e295886bc03506a
|
3 |
+
size 2490337361
|
checkpoint-20/preprocessor_config.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_normalize": true,
|
3 |
+
"feature_extractor_type": "Wav2Vec2FeatureExtractor",
|
4 |
+
"feature_size": 1,
|
5 |
+
"padding_side": "right",
|
6 |
+
"padding_value": 0,
|
7 |
+
"return_attention_mask": true,
|
8 |
+
"sampling_rate": 16000
|
9 |
+
}
|
checkpoint-20/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e9b6100773ca3a4687e69ef744a414602f964603f13c71a09f8727f73dd051f1
|
3 |
+
size 1262063089
|
checkpoint-20/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:73e968b5fb75f3d028c5b87fc29d361b46586d839eb1fc406be3e77c69542778
|
3 |
+
size 14567
|
checkpoint-20/scaler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a8d1040cd70f91f849688103143e8c9d631ff10c4acee0b2f4d6ff9e27727776
|
3 |
+
size 559
|
checkpoint-20/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7bee97b3839fd1b431d21567a2339671d799c163d5402ee322afd861513750b5
|
3 |
+
size 623
|
checkpoint-20/trainer_state.json
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 0.26490066225165565,
|
5 |
+
"global_step": 20,
|
6 |
+
"is_hyper_param_search": false,
|
7 |
+
"is_local_process_zero": true,
|
8 |
+
"is_world_process_zero": true,
|
9 |
+
"log_history": [
|
10 |
+
{
|
11 |
+
"epoch": 0.13,
|
12 |
+
"learning_rate": 4.9999999999999996e-05,
|
13 |
+
"loss": 13.2048,
|
14 |
+
"step": 10
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"epoch": 0.26,
|
18 |
+
"learning_rate": 7.46938775510204e-05,
|
19 |
+
"loss": 26.5657,
|
20 |
+
"step": 20
|
21 |
+
},
|
22 |
+
{
|
23 |
+
"epoch": 0.26,
|
24 |
+
"eval_loss": 17.199691772460938,
|
25 |
+
"eval_runtime": 5.248,
|
26 |
+
"eval_samples_per_second": 18.674,
|
27 |
+
"eval_steps_per_second": 0.762,
|
28 |
+
"eval_wer": 1.0,
|
29 |
+
"step": 20
|
30 |
+
}
|
31 |
+
],
|
32 |
+
"max_steps": 750,
|
33 |
+
"num_train_epochs": 10,
|
34 |
+
"total_flos": 4.155165485184e+17,
|
35 |
+
"trial_name": null,
|
36 |
+
"trial_params": null
|
37 |
+
}
|
checkpoint-20/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:470abf25a211ef5ec3a9821c884f2749839bee5e95f633b168d26f90c1809409
|
3 |
+
size 3055
|
old_run.sh
CHANGED
@@ -6,11 +6,13 @@ python old_run_asr_ctc.py \
|
|
6 |
--eval_split_name="test,None" \
|
7 |
--output_dir="./" \
|
8 |
--overwrite_output_dir \
|
9 |
-
--num_train_epochs="
|
10 |
--per_device_train_batch_size="32" \
|
11 |
--per_device_eval_batch_size="32" \
|
12 |
--gradient_accumulation_steps="4" \
|
13 |
-
--learning_rate="7.5e-
|
|
|
|
|
14 |
--warmup_ratio="0.0" \
|
15 |
--length_column_name="input_length" \
|
16 |
--evaluation_strategy="steps" \
|
|
|
6 |
--eval_split_name="test,None" \
|
7 |
--output_dir="./" \
|
8 |
--overwrite_output_dir \
|
9 |
+
--num_train_epochs="10" \
|
10 |
--per_device_train_batch_size="32" \
|
11 |
--per_device_eval_batch_size="32" \
|
12 |
--gradient_accumulation_steps="4" \
|
13 |
+
--learning_rate="7.5e-5" \
|
14 |
+
--max_train_samples="10000" \
|
15 |
+
--max_eval_samples="100" \
|
16 |
--warmup_ratio="0.0" \
|
17 |
--length_column_name="input_length" \
|
18 |
--evaluation_strategy="steps" \
|
old_run_asr_ctc.py
CHANGED
@@ -525,7 +525,7 @@ def main():
|
|
525 |
)
|
526 |
new_dataset_sampling_rate = new_dataset["eval"].features[data_args.audio_column_name].sampling_rate
|
527 |
if new_dataset_sampling_rate != dataset_sampling_rate:
|
528 |
-
print(f"New dataset sampling rate casted from {
|
529 |
new_dataset["eval"] = new_dataset["eval"].cast_column(
|
530 |
data_args.audio_column_name, datasets.features.Audio(sampling_rate=dataset_sampling_rate)
|
531 |
)
|
|
|
525 |
)
|
526 |
new_dataset_sampling_rate = new_dataset["eval"].features[data_args.audio_column_name].sampling_rate
|
527 |
if new_dataset_sampling_rate != dataset_sampling_rate:
|
528 |
+
print(f"New dataset sampling rate casted from {new_dataset_sampling_rate} to {dataset_sampling_rate}")
|
529 |
new_dataset["eval"] = new_dataset["eval"].cast_column(
|
530 |
data_args.audio_column_name, datasets.features.Audio(sampling_rate=dataset_sampling_rate)
|
531 |
)
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1262063089
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e9b6100773ca3a4687e69ef744a414602f964603f13c71a09f8727f73dd051f1
|
3 |
size 1262063089
|
run.sh
CHANGED
@@ -6,20 +6,22 @@ python run_speech_recognition_ctc.py \
|
|
6 |
--eval_split_name="test" \
|
7 |
--output_dir="./" \
|
8 |
--overwrite_output_dir \
|
9 |
-
--num_train_epochs="
|
10 |
--per_device_train_batch_size="32" \
|
11 |
--per_device_eval_batch_size="32" \
|
12 |
--gradient_accumulation_steps="4" \
|
13 |
-
--learning_rate="7.5e-" \
|
|
|
|
|
14 |
--warmup_ratio="0.02" \
|
15 |
--length_column_name="input_length" \
|
16 |
--evaluation_strategy="steps" \
|
17 |
--save_strategy="steps" \
|
18 |
-
--eval_steps="
|
19 |
-
--save_steps="
|
20 |
--text_column_name="sentence" \
|
21 |
--chars_to_ignore , ? . ! \- \; \: \" β % β β οΏ½ β β β¦ β \
|
22 |
-
--logging_steps="
|
23 |
--layerdrop="0.0" \
|
24 |
--activation_dropout="0.1" \
|
25 |
--save_total_limit="2" \
|
@@ -39,7 +41,7 @@ python run_speech_recognition_ctc.py \
|
|
39 |
python run_speech_recognition_ctc.py \
|
40 |
--dataset_name="mozilla-foundation/common_voice_8_0" \
|
41 |
--model_name_or_path="KBLab/wav2vec2-large-voxrex" \
|
42 |
-
--dataset_config_name="
|
43 |
--train_split_name="train+validation" \
|
44 |
--eval_split_name="test" \
|
45 |
--output_dir="./" \
|
|
|
6 |
--eval_split_name="test" \
|
7 |
--output_dir="./" \
|
8 |
--overwrite_output_dir \
|
9 |
+
--num_train_epochs="10" \
|
10 |
--per_device_train_batch_size="32" \
|
11 |
--per_device_eval_batch_size="32" \
|
12 |
--gradient_accumulation_steps="4" \
|
13 |
+
--learning_rate="7.5e-5" \
|
14 |
+
--max_train_samples="10000" \
|
15 |
+
--max_eval_samples="100" \
|
16 |
--warmup_ratio="0.02" \
|
17 |
--length_column_name="input_length" \
|
18 |
--evaluation_strategy="steps" \
|
19 |
--save_strategy="steps" \
|
20 |
+
--eval_steps="20" \
|
21 |
+
--save_steps="20" \
|
22 |
--text_column_name="sentence" \
|
23 |
--chars_to_ignore , ? . ! \- \; \: \" β % β β οΏ½ β β β¦ β \
|
24 |
+
--logging_steps="10" \
|
25 |
--layerdrop="0.0" \
|
26 |
--activation_dropout="0.1" \
|
27 |
--save_total_limit="2" \
|
|
|
41 |
python run_speech_recognition_ctc.py \
|
42 |
--dataset_name="mozilla-foundation/common_voice_8_0" \
|
43 |
--model_name_or_path="KBLab/wav2vec2-large-voxrex" \
|
44 |
+
--dataset_config_name="sv-SE" \
|
45 |
--train_split_name="train+validation" \
|
46 |
--eval_split_name="test" \
|
47 |
--output_dir="./" \
|
run_speech_recognition_ctc.py
CHANGED
@@ -476,6 +476,9 @@ def main():
|
|
476 |
f"{', '.join(raw_datasets['train'].column_names)}."
|
477 |
)
|
478 |
|
|
|
|
|
|
|
479 |
if data_args.text_column_name not in raw_datasets["train"].column_names:
|
480 |
raise ValueError(
|
481 |
f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. "
|
@@ -528,12 +531,18 @@ def main():
|
|
528 |
# else:
|
529 |
# logging.warning(f"{dataset_name} {dataset_config_name} eval not loaded as split is {eval_split_name}")
|
530 |
|
531 |
-
|
532 |
-
|
533 |
-
|
534 |
-
|
535 |
-
|
536 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
537 |
|
538 |
if data_args.max_eval_samples is not None:
|
539 |
raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples))
|
|
|
476 |
f"{', '.join(raw_datasets['train'].column_names)}."
|
477 |
)
|
478 |
|
479 |
+
dataset_frequency = raw_datasets["train"].features[data_args.audio_column_name].sampling_rate
|
480 |
+
print(f"Dataset sampling rate: {dataset_frequency}")
|
481 |
+
|
482 |
if data_args.text_column_name not in raw_datasets["train"].column_names:
|
483 |
raise ValueError(
|
484 |
f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. "
|
|
|
531 |
# else:
|
532 |
# logging.warning(f"{dataset_name} {dataset_config_name} eval not loaded as split is {eval_split_name}")
|
533 |
|
534 |
+
try:
|
535 |
+
raw_datasets["eval"] = load_dataset(
|
536 |
+
data_args.dataset_name,
|
537 |
+
data_args.dataset_config_name,
|
538 |
+
split=data_args.eval_split_name,
|
539 |
+
use_auth_token=data_args.use_auth_token,
|
540 |
+
)
|
541 |
+
except ValueError:
|
542 |
+
split_dataset = raw_datasets["train"].train_test_split(test_size=0.1, seed=42)
|
543 |
+
raw_datasets["eval"] = split_dataset["test"]
|
544 |
+
print(raw_datasets["eval"])
|
545 |
+
print("Sampled from training set")
|
546 |
|
547 |
if data_args.max_eval_samples is not None:
|
548 |
raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples))
|
special_tokens_map.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 3055
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:470abf25a211ef5ec3a9821c884f2749839bee5e95f633b168d26f90c1809409
|
3 |
size 3055
|