File size: 3,145 Bytes
32318f6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
paths:
  run_dir: results/${project}
  ckpt_dir: ${paths.run_dir}/checkpoints
trainer:
  _target_: lightning.pytorch.trainer.Trainer
  default_root_dir: ${paths.run_dir}
  accelerator: gpu
  num_nodes: 1
  devices: auto
  strategy:
    _target_: lightning.pytorch.strategies.DDPStrategy
    process_group_backend: nccl
  precision: bf16-true
  check_val_every_n_epoch: null
  val_check_interval: 1000
  max_steps: 100000
  benchmark: true
  accumulate_grad_batches: 1
  gradient_clip_val: 1.0
  gradient_clip_algorithm: norm
  limit_val_batches: 10
callbacks:
  model_checkpoint:
    _target_: lightning.pytorch.callbacks.ModelCheckpoint
    dirpath: ${paths.ckpt_dir}
    filename: step_{step:09d}
    save_last: false
    save_top_k: 5
    monitor: step
    mode: max
    every_n_epochs: null
    every_n_train_steps: ${trainer.val_check_interval}
    auto_insert_metric_name: false
  model_summary:
    _target_: lightning.pytorch.callbacks.ModelSummary
    max_depth: 2
  learning_rate_monitor:
    _target_: lightning.pytorch.callbacks.LearningRateMonitor
    logging_interval: step
    log_momentum: false
  grad_norm_monitor:
    _target_: fish_speech.callbacks.GradNormMonitor
    norm_type: 2
    logging_interval: step
logger:
  tensorboard:
    _target_: lightning.pytorch.loggers.tensorboard.TensorBoardLogger
    save_dir: ${paths.run_dir}/tensorboard/
    name: null
    log_graph: false
    default_hp_metric: true
    prefix: ''
train: true
test: false
project: mix_v2
max_length: 1024
pretrained_ckpt_path: checkpoints/fish-speech-1.2
tokenizer:
  _target_: transformers.AutoTokenizer.from_pretrained
  pretrained_model_name_or_path: ${pretrained_ckpt_path}
train_dataset:
  _target_: fish_speech.datasets.semantic.AutoTextSemanticInstructionDataset
  proto_files:
  - data/protos
  tokenizer: ${tokenizer}
  causal: true
  max_length: ${max_length}
  use_speaker: false
  interactive_prob: 0.7
val_dataset:
  _target_: fish_speech.datasets.semantic.AutoTextSemanticInstructionDataset
  proto_files:
  - data/protos
  tokenizer: ${tokenizer}
  causal: true
  max_length: ${max_length}
  use_speaker: false
  interactive_prob: 0.7
data:
  _target_: fish_speech.datasets.semantic.SemanticDataModule
  train_dataset: ${train_dataset}
  val_dataset: ${val_dataset}
  num_workers: 4
  batch_size: 4
  tokenizer: ${tokenizer}
  max_length: ${max_length}
model:
  _target_: fish_speech.models.text2semantic.lit_module.TextToSemantic
  model:
    _target_: fish_speech.models.text2semantic.llama.BaseTransformer.from_pretrained
    path: ${pretrained_ckpt_path}
    load_weights: true
    max_length: ${max_length}
    lora_config:
      _target_: fish_speech.models.text2semantic.lora.LoraConfig
      r: 8
      lora_alpha: 16
      lora_dropout: 0.01
  optimizer:
    _target_: torch.optim.AdamW
    _partial_: true
    lr: 0.0001
    weight_decay: 0.01
    betas:
    - 0.9
    - 0.95
    eps: 1.0e-05
  lr_scheduler:
    _target_: torch.optim.lr_scheduler.LambdaLR
    _partial_: true
    lr_lambda:
      _target_: fish_speech.scheduler.get_constant_schedule_with_warmup_lr_lambda
      _partial_: true
      num_warmup_steps: 50