Commit
•
1b22ac1
1
Parent(s):
6dc252b
Saving train state of step 80000
Browse files- checkpoint-80000-epoch-5/optimizer.bin +3 -0
- checkpoint-80000-epoch-5/pytorch_model.bin +3 -0
- checkpoint-80000-epoch-5/random_states_0.pkl +3 -0
- checkpoint-80000-epoch-5/random_states_1.pkl +3 -0
- checkpoint-80000-epoch-5/random_states_2.pkl +3 -0
- checkpoint-80000-epoch-5/random_states_3.pkl +3 -0
- checkpoint-80000-epoch-5/random_states_4.pkl +3 -0
- checkpoint-80000-epoch-5/random_states_5.pkl +3 -0
- checkpoint-80000-epoch-5/random_states_6.pkl +3 -0
- checkpoint-80000-epoch-5/random_states_7.pkl +3 -0
- checkpoint-80000-epoch-5/scheduler.bin +3 -0
- starting_point_0.01_rope.json +1 -0
- training/__pycache__/arguments.cpython-311.pyc +0 -0
- training/__pycache__/data.cpython-311.pyc +0 -0
- training/__pycache__/eval.cpython-311.pyc +0 -0
- training/__pycache__/utils.cpython-311.pyc +0 -0
- training/arguments.py +7 -1
- training/data.py +6 -1
- training/eval.py +1 -2
- training/run_parler_tts_training.py +29 -9
- training/utils.py +4 -2
checkpoint-80000-epoch-5/optimizer.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9d85a6ebcfaea697deb81a9f43cf52b763722cc63c52559be99475194f3b1740
|
3 |
+
size 3652769047
|
checkpoint-80000-epoch-5/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:64ed77445bae74c0a44de014644335672082db652c167e03307f5aa69f497029
|
3 |
+
size 2588465818
|
checkpoint-80000-epoch-5/random_states_0.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9bee040cadf66c07cc28951d8b63f5309317c467c935239899967dedde236356
|
3 |
+
size 16100
|
checkpoint-80000-epoch-5/random_states_1.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ac2984f6c11e0d6baba6e153c36e02cf7d8849f435a8e1d10db3db9422a5e14f
|
3 |
+
size 16100
|
checkpoint-80000-epoch-5/random_states_2.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0328ba0db514b5427f0cf5728abf82349345bec8a02097c30900ec57f433da11
|
3 |
+
size 16100
|
checkpoint-80000-epoch-5/random_states_3.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:668cc4578435da562a838c3860a81ef62a34fdfed1920ccebb7657eb586c14cb
|
3 |
+
size 16100
|
checkpoint-80000-epoch-5/random_states_4.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c856097a2b0832cc85eba37f9cd1be76e0d395fe4b2c99ef3153199386dae5e5
|
3 |
+
size 16100
|
checkpoint-80000-epoch-5/random_states_5.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:01d11b3bb1f3959f58be8c638fb21de1e89e54c13aa716a178687cb1fc51ac99
|
3 |
+
size 16100
|
checkpoint-80000-epoch-5/random_states_6.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:18447190b31cbfd6ac8565a46dfbdc82123f510bef0eb0a72313d1150001b8f4
|
3 |
+
size 16100
|
checkpoint-80000-epoch-5/random_states_7.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:025b2bf49a9a6fe2a6fd8cb7a98ba98f4eb439e04b5aa68fd3285c8c109bdc4f
|
3 |
+
size 16100
|
checkpoint-80000-epoch-5/scheduler.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fd55300ad4b9da07287969193b88215d1af9b5d03f7bf3a833f1b260aa6434c4
|
3 |
+
size 1000
|
starting_point_0.01_rope.json
CHANGED
@@ -10,6 +10,7 @@
|
|
10 |
"prompt_tokenizer_name":"google/flan-t5-base",
|
11 |
|
12 |
"report_to": ["wandb"],
|
|
|
13 |
"overwrite_output_dir": false,
|
14 |
"output_dir": "./",
|
15 |
|
|
|
10 |
"prompt_tokenizer_name":"google/flan-t5-base",
|
11 |
|
12 |
"report_to": ["wandb"],
|
13 |
+
"wandb_run_name": "parler-tts-600M-cross-attention-rope",
|
14 |
"overwrite_output_dir": false,
|
15 |
"output_dir": "./",
|
16 |
|
training/__pycache__/arguments.cpython-311.pyc
CHANGED
Binary files a/training/__pycache__/arguments.cpython-311.pyc and b/training/__pycache__/arguments.cpython-311.pyc differ
|
|
training/__pycache__/data.cpython-311.pyc
CHANGED
Binary files a/training/__pycache__/data.cpython-311.pyc and b/training/__pycache__/data.cpython-311.pyc differ
|
|
training/__pycache__/eval.cpython-311.pyc
CHANGED
Binary files a/training/__pycache__/eval.cpython-311.pyc and b/training/__pycache__/eval.cpython-311.pyc differ
|
|
training/__pycache__/utils.cpython-311.pyc
CHANGED
Binary files a/training/__pycache__/utils.cpython-311.pyc and b/training/__pycache__/utils.cpython-311.pyc differ
|
|
training/arguments.py
CHANGED
@@ -218,7 +218,7 @@ class DataTrainingArguments:
|
|
218 |
metadata={
|
219 |
"help": (
|
220 |
"If set, filter samples with descriptions that are longer than `max_description_token_length` tokens."
|
221 |
-
"Also, used to set maximum
|
222 |
)
|
223 |
},
|
224 |
)
|
@@ -277,6 +277,12 @@ class DataTrainingArguments:
|
|
277 |
default="parler-speech",
|
278 |
metadata={"help": "The name of the wandb project."},
|
279 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
280 |
save_to_disk: str = field(
|
281 |
default=None,
|
282 |
metadata={
|
|
|
218 |
metadata={
|
219 |
"help": (
|
220 |
"If set, filter samples with descriptions that are longer than `max_description_token_length` tokens."
|
221 |
+
"Also, used to set maximum description token length if `pad_to_max_length=True`."
|
222 |
)
|
223 |
},
|
224 |
)
|
|
|
277 |
default="parler-speech",
|
278 |
metadata={"help": "The name of the wandb project."},
|
279 |
)
|
280 |
+
wandb_run_name: str = field(
|
281 |
+
default=None,
|
282 |
+
metadata={
|
283 |
+
"help": "If specified, the name of the run. If not specified, wandb will give a random name to this run."
|
284 |
+
},
|
285 |
+
)
|
286 |
save_to_disk: str = field(
|
287 |
default=None,
|
288 |
metadata={
|
training/data.py
CHANGED
@@ -31,7 +31,12 @@ class DataCollatorEncodecWithPadding:
|
|
31 |
audios = [feature[self.audio_column_name]["array"] for feature in features]
|
32 |
len_audio = [len(audio) for audio in audios]
|
33 |
|
34 |
-
|
|
|
|
|
|
|
|
|
|
|
35 |
batch["len_audio"] = torch.tensor(len_audio).unsqueeze(1)
|
36 |
return batch
|
37 |
|
|
|
31 |
audios = [feature[self.audio_column_name]["array"] for feature in features]
|
32 |
len_audio = [len(audio) for audio in audios]
|
33 |
|
34 |
+
# since resampling has already been performed in the 'load_multiple_datasets' function,
|
35 |
+
# a fixed sampling_rate(44100hz) is passed to the feature_extractor.
|
36 |
+
sampling_rate = self.feature_extractor.sampling_rate
|
37 |
+
batch = self.feature_extractor(
|
38 |
+
audios, sampling_rate=sampling_rate, return_tensors="pt", padding=self.padding, max_length=self.max_length
|
39 |
+
)
|
40 |
batch["len_audio"] = torch.tensor(len_audio).unsqueeze(1)
|
41 |
return batch
|
42 |
|
training/eval.py
CHANGED
@@ -47,8 +47,7 @@ def wer(asr_model_name_or_path, prompts, audios, device, per_device_eval_batch_s
|
|
47 |
normalized_references = []
|
48 |
|
49 |
for pred, ref in zip(transcriptions, prompts):
|
50 |
-
normalizer = english_normalizer
|
51 |
-
|
52 |
norm_ref = normalizer(ref)
|
53 |
if len(norm_ref) > 0:
|
54 |
norm_pred = normalizer(pred["text"])
|
|
|
47 |
normalized_references = []
|
48 |
|
49 |
for pred, ref in zip(transcriptions, prompts):
|
50 |
+
normalizer = english_normalizer if return_language and pred["chunks"][0]["language"] == "english" else basic_normalizer
|
|
|
51 |
norm_ref = normalizer(ref)
|
52 |
if len(norm_ref) > 0:
|
53 |
norm_pred = normalizer(pred["text"])
|
training/run_parler_tts_training.py
CHANGED
@@ -98,9 +98,6 @@ def main():
|
|
98 |
|
99 |
####### A. Preparation
|
100 |
kwargs_handlers = [InitProcessGroupKwargs(timeout=timedelta(minutes=60))]
|
101 |
-
if training_args.torch_compile:
|
102 |
-
# TODO(YL): add more compile modes?
|
103 |
-
kwargs_handlers.append(TorchDynamoPlugin(backend="inductor", mode="default")) # reduce-overhead
|
104 |
|
105 |
accelerator = Accelerator(
|
106 |
gradient_accumulation_steps=training_args.gradient_accumulation_steps,
|
@@ -129,6 +126,7 @@ def main():
|
|
129 |
"adam_beta2": training_args.adam_beta2,
|
130 |
"temperature": model_args.temperature,
|
131 |
},
|
|
|
132 |
)
|
133 |
|
134 |
# Detecting last checkpoint and eventually continue from last checkpoint
|
@@ -314,6 +312,7 @@ def main():
|
|
314 |
token=data_args.token,
|
315 |
trust_remote_code=data_args.trust_remote_code,
|
316 |
)
|
|
|
317 |
|
318 |
# enable gradient checkpointing if necessary
|
319 |
if training_args.gradient_checkpointing:
|
@@ -334,8 +333,8 @@ def main():
|
|
334 |
feature_extractor_input_name = feature_extractor.model_input_names[0]
|
335 |
audio_encoder_pad_token_id = config.decoder.pad_token_id
|
336 |
audio_encoder_eos_token_id = config.decoder.eos_token_id
|
337 |
-
audio_encoder_bos_token_id =
|
338 |
-
max_length =
|
339 |
num_codebooks = model.decoder.config.num_codebooks
|
340 |
bandwidth = model_args.bandwidth
|
341 |
|
@@ -538,7 +537,7 @@ def main():
|
|
538 |
logger.info(f"Dataset saved at {data_args.save_to_disk}")
|
539 |
|
540 |
audio_max_length = None
|
541 |
-
if
|
542 |
audio_max_length = max(vectorized_datasets["train"]["target_length"])
|
543 |
with accelerator.main_process_first():
|
544 |
max_sample = vectorized_datasets["train"].filter(
|
@@ -548,6 +547,18 @@ def main():
|
|
548 |
)
|
549 |
audio_max_length = torch.tensor(max_sample[0]["labels"]).shape[1]
|
550 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
551 |
# for large datasets it is advised to run the preprocessing on a
|
552 |
# single machine first with ``args.preprocessing_only`` since there will mostly likely
|
553 |
# be a timeout when running the script in distributed mode.
|
@@ -670,6 +681,8 @@ def main():
|
|
670 |
checkpoint = last_checkpoint
|
671 |
|
672 |
if accelerator.is_main_process:
|
|
|
|
|
673 |
if training_args.push_to_hub:
|
674 |
api = HfApi(token=training_args.hub_token)
|
675 |
|
@@ -682,8 +695,6 @@ def main():
|
|
682 |
with open(os.path.join(training_args.output_dir, ".gitignore"), "w+") as gitignore:
|
683 |
if "wandb" not in gitignore:
|
684 |
gitignore.write("wandb\n")
|
685 |
-
elif training_args.output_dir is not None:
|
686 |
-
os.makedirs(training_args.output_dir, exist_ok=True)
|
687 |
accelerator.wait_for_everyone()
|
688 |
|
689 |
# Now save everything to be able to create a single processor later
|
@@ -740,7 +751,13 @@ def main():
|
|
740 |
"do_sample": model_args.do_sample,
|
741 |
"temperature": model_args.temperature,
|
742 |
"max_length": model_args.max_length,
|
|
|
|
|
|
|
|
|
743 |
}
|
|
|
|
|
744 |
|
745 |
# Define gradient update step fn
|
746 |
def train_step(
|
@@ -869,9 +886,11 @@ def main():
|
|
869 |
# safe_serialization=False to avoid shared tensors saving issue (TODO(YL): it's a temporary fix)
|
870 |
# https://github.com/huggingface/transformers/issues/27293#issuecomment-1872560074
|
871 |
accelerator.save_state(output_dir=intermediate_dir, safe_serialization=False)
|
|
|
|
|
872 |
accelerator.wait_for_everyone()
|
873 |
if accelerator.is_main_process:
|
874 |
-
rotate_checkpoints(
|
875 |
training_args.save_total_limit, output_dir=training_args.output_dir, logger=logger
|
876 |
)
|
877 |
|
@@ -886,6 +905,7 @@ def main():
|
|
886 |
folder_path=training_args.output_dir,
|
887 |
commit_message=f"Saving train state of step {cur_step}",
|
888 |
run_as_future=True,
|
|
|
889 |
)
|
890 |
|
891 |
if training_args.do_eval and (cur_step % eval_steps == 0 or cur_step == total_train_steps):
|
|
|
98 |
|
99 |
####### A. Preparation
|
100 |
kwargs_handlers = [InitProcessGroupKwargs(timeout=timedelta(minutes=60))]
|
|
|
|
|
|
|
101 |
|
102 |
accelerator = Accelerator(
|
103 |
gradient_accumulation_steps=training_args.gradient_accumulation_steps,
|
|
|
126 |
"adam_beta2": training_args.adam_beta2,
|
127 |
"temperature": model_args.temperature,
|
128 |
},
|
129 |
+
init_kwargs={"wandb": {"name": data_args.wandb_run_name}} if data_args.wandb_run_name else {},
|
130 |
)
|
131 |
|
132 |
# Detecting last checkpoint and eventually continue from last checkpoint
|
|
|
312 |
token=data_args.token,
|
313 |
trust_remote_code=data_args.trust_remote_code,
|
314 |
)
|
315 |
+
generation_config = model.generation_config
|
316 |
|
317 |
# enable gradient checkpointing if necessary
|
318 |
if training_args.gradient_checkpointing:
|
|
|
333 |
feature_extractor_input_name = feature_extractor.model_input_names[0]
|
334 |
audio_encoder_pad_token_id = config.decoder.pad_token_id
|
335 |
audio_encoder_eos_token_id = config.decoder.eos_token_id
|
336 |
+
audio_encoder_bos_token_id = generation_config.decoder_start_token_id
|
337 |
+
max_length = generation_config.max_length
|
338 |
num_codebooks = model.decoder.config.num_codebooks
|
339 |
bandwidth = model_args.bandwidth
|
340 |
|
|
|
537 |
logger.info(f"Dataset saved at {data_args.save_to_disk}")
|
538 |
|
539 |
audio_max_length = None
|
540 |
+
if padding == "max_length":
|
541 |
audio_max_length = max(vectorized_datasets["train"]["target_length"])
|
542 |
with accelerator.main_process_first():
|
543 |
max_sample = vectorized_datasets["train"].filter(
|
|
|
547 |
)
|
548 |
audio_max_length = torch.tensor(max_sample[0]["labels"]).shape[1]
|
549 |
|
550 |
+
if training_args.group_by_length:
|
551 |
+
# apply a simple heuristic to take into account audio and text lengths
|
552 |
+
def add_target_lengths(target_length, prompt, description):
|
553 |
+
return {"target_length": target_length + len(prompt) + len(description)}
|
554 |
+
|
555 |
+
with accelerator.main_process_first():
|
556 |
+
vectorized_datasets = vectorized_datasets.map(
|
557 |
+
add_target_lengths,
|
558 |
+
num_proc=num_workers,
|
559 |
+
input_columns=["target_length", "prompt_input_ids", "input_ids"],
|
560 |
+
)
|
561 |
+
|
562 |
# for large datasets it is advised to run the preprocessing on a
|
563 |
# single machine first with ``args.preprocessing_only`` since there will mostly likely
|
564 |
# be a timeout when running the script in distributed mode.
|
|
|
681 |
checkpoint = last_checkpoint
|
682 |
|
683 |
if accelerator.is_main_process:
|
684 |
+
if training_args.output_dir is not None:
|
685 |
+
os.makedirs(training_args.output_dir, exist_ok=True)
|
686 |
if training_args.push_to_hub:
|
687 |
api = HfApi(token=training_args.hub_token)
|
688 |
|
|
|
695 |
with open(os.path.join(training_args.output_dir, ".gitignore"), "w+") as gitignore:
|
696 |
if "wandb" not in gitignore:
|
697 |
gitignore.write("wandb\n")
|
|
|
|
|
698 |
accelerator.wait_for_everyone()
|
699 |
|
700 |
# Now save everything to be able to create a single processor later
|
|
|
751 |
"do_sample": model_args.do_sample,
|
752 |
"temperature": model_args.temperature,
|
753 |
"max_length": model_args.max_length,
|
754 |
+
# Because of the delayed pattern mask, generation might stop earlier because of unexpected behaviour
|
755 |
+
# on the first tokens of the codebooks that are delayed.
|
756 |
+
# This fix the issue.
|
757 |
+
"min_new_tokens": num_codebooks + 1,
|
758 |
}
|
759 |
+
for key in gen_kwargs:
|
760 |
+
generation_config.key = gen_kwargs[key]
|
761 |
|
762 |
# Define gradient update step fn
|
763 |
def train_step(
|
|
|
886 |
# safe_serialization=False to avoid shared tensors saving issue (TODO(YL): it's a temporary fix)
|
887 |
# https://github.com/huggingface/transformers/issues/27293#issuecomment-1872560074
|
888 |
accelerator.save_state(output_dir=intermediate_dir, safe_serialization=False)
|
889 |
+
config.save_pretrained(intermediate_dir)
|
890 |
+
generation_config.save_pretrained(intermediate_dir)
|
891 |
accelerator.wait_for_everyone()
|
892 |
if accelerator.is_main_process:
|
893 |
+
checkpoints_to_be_deleted = rotate_checkpoints(
|
894 |
training_args.save_total_limit, output_dir=training_args.output_dir, logger=logger
|
895 |
)
|
896 |
|
|
|
905 |
folder_path=training_args.output_dir,
|
906 |
commit_message=f"Saving train state of step {cur_step}",
|
907 |
run_as_future=True,
|
908 |
+
delete_patterns=checkpoints_to_be_deleted,
|
909 |
)
|
910 |
|
911 |
if training_args.do_eval and (cur_step % eval_steps == 0 or cur_step == total_train_steps):
|
training/utils.py
CHANGED
@@ -3,7 +3,7 @@ import re
|
|
3 |
import shutil
|
4 |
from pathlib import Path
|
5 |
from dataclasses import field
|
6 |
-
from typing import Dict, List
|
7 |
|
8 |
import torch
|
9 |
from wandb import Audio
|
@@ -44,7 +44,7 @@ def sorted_checkpoints(output_dir=None, checkpoint_prefix="checkpoint") -> List[
|
|
44 |
return checkpoints_sorted
|
45 |
|
46 |
|
47 |
-
def rotate_checkpoints(save_total_limit=None, output_dir=None, checkpoint_prefix="checkpoint", logger=None) -> None:
|
48 |
"""Helper function to delete old checkpoints."""
|
49 |
if save_total_limit is None or save_total_limit <= 0:
|
50 |
return
|
@@ -58,6 +58,8 @@ def rotate_checkpoints(save_total_limit=None, output_dir=None, checkpoint_prefix
|
|
58 |
for checkpoint in checkpoints_to_be_deleted:
|
59 |
logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit")
|
60 |
shutil.rmtree(checkpoint, ignore_errors=True)
|
|
|
|
|
61 |
|
62 |
|
63 |
def log_metric(
|
|
|
3 |
import shutil
|
4 |
from pathlib import Path
|
5 |
from dataclasses import field
|
6 |
+
from typing import Dict, List, Union
|
7 |
|
8 |
import torch
|
9 |
from wandb import Audio
|
|
|
44 |
return checkpoints_sorted
|
45 |
|
46 |
|
47 |
+
def rotate_checkpoints(save_total_limit=None, output_dir=None, checkpoint_prefix="checkpoint", logger=None) -> Union[List, None]:
|
48 |
"""Helper function to delete old checkpoints."""
|
49 |
if save_total_limit is None or save_total_limit <= 0:
|
50 |
return
|
|
|
58 |
for checkpoint in checkpoints_to_be_deleted:
|
59 |
logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit")
|
60 |
shutil.rmtree(checkpoint, ignore_errors=True)
|
61 |
+
checkpoints_to_be_deleted = [f"*{Path(checkpoint).absolute().name}*" for checkpoint in checkpoints_to_be_deleted]
|
62 |
+
return checkpoints_to_be_deleted
|
63 |
|
64 |
|
65 |
def log_metric(
|