File size: 3,039 Bytes
32318f6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
model:
  _target_: fish_speech.models.text2semantic.lit_module.TextToSemantic
  model:
    _target_: fish_speech.models.text2semantic.llama.BaseTransformer.from_pretrained
    path: checkpoints/fish-speech-1.2
    load_weights: true
    max_length: 1024
    lora_config:
      _target_: fish_speech.models.text2semantic.lora.LoraConfig
      r: 8
      lora_alpha: 16
      lora_dropout: 0.01
  optimizer:
    _target_: torch.optim.AdamW
    _partial_: true
    lr: 0.0001
    weight_decay: 0.01
    betas:
    - 0.9
    - 0.95
    eps: 1.0e-05
  lr_scheduler:
    _target_: torch.optim.lr_scheduler.LambdaLR
    _partial_: true
    lr_lambda:
      _target_: fish_speech.scheduler.get_constant_schedule_with_warmup_lr_lambda
      _partial_: true
      num_warmup_steps: 50
model/params/total: 495286272
model/params/trainable: 5017600
model/params/non_trainable: 490268672
data:
  _target_: fish_speech.datasets.semantic.SemanticDataModule
  train_dataset:
    _target_: fish_speech.datasets.semantic.AutoTextSemanticInstructionDataset
    proto_files:
    - data/protos
    tokenizer:
      _target_: transformers.AutoTokenizer.from_pretrained
      pretrained_model_name_or_path: checkpoints/fish-speech-1.2
    causal: true
    max_length: 1024
    use_speaker: false
    interactive_prob: 0.7
  val_dataset:
    _target_: fish_speech.datasets.semantic.AutoTextSemanticInstructionDataset
    proto_files:
    - data/protos
    tokenizer:
      _target_: transformers.AutoTokenizer.from_pretrained
      pretrained_model_name_or_path: checkpoints/fish-speech-1.2
    causal: true
    max_length: 1024
    use_speaker: false
    interactive_prob: 0.7
  num_workers: 4
  batch_size: 4
  tokenizer:
    _target_: transformers.AutoTokenizer.from_pretrained
    pretrained_model_name_or_path: checkpoints/fish-speech-1.2
  max_length: 1024
trainer:
  _target_: lightning.pytorch.trainer.Trainer
  default_root_dir: results/mix_v2
  accelerator: gpu
  num_nodes: 1
  devices: auto
  strategy:
    _target_: lightning.pytorch.strategies.DDPStrategy
    process_group_backend: nccl
  precision: bf16-true
  check_val_every_n_epoch: null
  val_check_interval: 1000
  max_steps: 100000
  benchmark: true
  accumulate_grad_batches: 1
  gradient_clip_val: 1.0
  gradient_clip_algorithm: norm
  limit_val_batches: 10
callbacks:
  model_checkpoint:
    _target_: lightning.pytorch.callbacks.ModelCheckpoint
    dirpath: results/mix_v2/checkpoints
    filename: step_{step:09d}
    save_last: false
    save_top_k: 5
    monitor: step
    mode: max
    every_n_epochs: null
    every_n_train_steps: 1000
    auto_insert_metric_name: false
  model_summary:
    _target_: lightning.pytorch.callbacks.ModelSummary
    max_depth: 2
  learning_rate_monitor:
    _target_: lightning.pytorch.callbacks.LearningRateMonitor
    logging_interval: step
    log_momentum: false
  grad_norm_monitor:
    _target_: fish_speech.callbacks.GradNormMonitor
    norm_type: 2
    logging_interval: step
extras: null
task_name: null
tags: null
ckpt_path: null
seed: null