binhnx8's picture
Upload folder using huggingface_hub
32318f6 verified
raw
history blame contribute delete
No virus
3.04 kB
model:
_target_: fish_speech.models.text2semantic.lit_module.TextToSemantic
model:
_target_: fish_speech.models.text2semantic.llama.BaseTransformer.from_pretrained
path: checkpoints/fish-speech-1.2
load_weights: true
max_length: 1024
lora_config:
_target_: fish_speech.models.text2semantic.lora.LoraConfig
r: 8
lora_alpha: 16
lora_dropout: 0.01
optimizer:
_target_: torch.optim.AdamW
_partial_: true
lr: 0.0001
weight_decay: 0.01
betas:
- 0.9
- 0.95
eps: 1.0e-05
lr_scheduler:
_target_: torch.optim.lr_scheduler.LambdaLR
_partial_: true
lr_lambda:
_target_: fish_speech.scheduler.get_constant_schedule_with_warmup_lr_lambda
_partial_: true
num_warmup_steps: 50
model/params/total: 495286272
model/params/trainable: 5017600
model/params/non_trainable: 490268672
data:
_target_: fish_speech.datasets.semantic.SemanticDataModule
train_dataset:
_target_: fish_speech.datasets.semantic.AutoTextSemanticInstructionDataset
proto_files:
- data/protos
tokenizer:
_target_: transformers.AutoTokenizer.from_pretrained
pretrained_model_name_or_path: checkpoints/fish-speech-1.2
causal: true
max_length: 1024
use_speaker: false
interactive_prob: 0.7
val_dataset:
_target_: fish_speech.datasets.semantic.AutoTextSemanticInstructionDataset
proto_files:
- data/protos
tokenizer:
_target_: transformers.AutoTokenizer.from_pretrained
pretrained_model_name_or_path: checkpoints/fish-speech-1.2
causal: true
max_length: 1024
use_speaker: false
interactive_prob: 0.7
num_workers: 4
batch_size: 4
tokenizer:
_target_: transformers.AutoTokenizer.from_pretrained
pretrained_model_name_or_path: checkpoints/fish-speech-1.2
max_length: 1024
trainer:
_target_: lightning.pytorch.trainer.Trainer
default_root_dir: results/mix_v2
accelerator: gpu
num_nodes: 1
devices: auto
strategy:
_target_: lightning.pytorch.strategies.DDPStrategy
process_group_backend: nccl
precision: bf16-true
check_val_every_n_epoch: null
val_check_interval: 1000
max_steps: 100000
benchmark: true
accumulate_grad_batches: 1
gradient_clip_val: 1.0
gradient_clip_algorithm: norm
limit_val_batches: 10
callbacks:
model_checkpoint:
_target_: lightning.pytorch.callbacks.ModelCheckpoint
dirpath: results/mix_v2/checkpoints
filename: step_{step:09d}
save_last: false
save_top_k: 5
monitor: step
mode: max
every_n_epochs: null
every_n_train_steps: 1000
auto_insert_metric_name: false
model_summary:
_target_: lightning.pytorch.callbacks.ModelSummary
max_depth: 2
learning_rate_monitor:
_target_: lightning.pytorch.callbacks.LearningRateMonitor
logging_interval: step
log_momentum: false
grad_norm_monitor:
_target_: fish_speech.callbacks.GradNormMonitor
norm_type: 2
logging_interval: step
extras: null
task_name: null
tags: null
ckpt_path: null
seed: null