binhnx8 commited on
Commit
32318f6
1 Parent(s): 9cfdd7e

Upload folder using huggingface_hub

Browse files
bolero_v1/.hydra/config.yaml ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ paths:
2
+ run_dir: results/${project}
3
+ ckpt_dir: ${paths.run_dir}/checkpoints
4
+ trainer:
5
+ _target_: lightning.pytorch.trainer.Trainer
6
+ default_root_dir: ${paths.run_dir}
7
+ accelerator: gpu
8
+ num_nodes: 1
9
+ devices: auto
10
+ strategy:
11
+ _target_: lightning.pytorch.strategies.DDPStrategy
12
+ process_group_backend: nccl
13
+ precision: bf16-true
14
+ check_val_every_n_epoch: null
15
+ val_check_interval: 1000
16
+ max_steps: 100000
17
+ benchmark: true
18
+ accumulate_grad_batches: 1
19
+ gradient_clip_val: 1.0
20
+ gradient_clip_algorithm: norm
21
+ limit_val_batches: 10
22
+ callbacks:
23
+ model_checkpoint:
24
+ _target_: lightning.pytorch.callbacks.ModelCheckpoint
25
+ dirpath: ${paths.ckpt_dir}
26
+ filename: step_{step:09d}
27
+ save_last: false
28
+ save_top_k: 5
29
+ monitor: step
30
+ mode: max
31
+ every_n_epochs: null
32
+ every_n_train_steps: ${trainer.val_check_interval}
33
+ auto_insert_metric_name: false
34
+ model_summary:
35
+ _target_: lightning.pytorch.callbacks.ModelSummary
36
+ max_depth: 2
37
+ learning_rate_monitor:
38
+ _target_: lightning.pytorch.callbacks.LearningRateMonitor
39
+ logging_interval: step
40
+ log_momentum: false
41
+ grad_norm_monitor:
42
+ _target_: fish_speech.callbacks.GradNormMonitor
43
+ norm_type: 2
44
+ logging_interval: step
45
+ logger:
46
+ tensorboard:
47
+ _target_: lightning.pytorch.loggers.tensorboard.TensorBoardLogger
48
+ save_dir: ${paths.run_dir}/tensorboard/
49
+ name: null
50
+ log_graph: false
51
+ default_hp_metric: true
52
+ prefix: ''
53
+ train: true
54
+ test: false
55
+ project: mix_v2
56
+ max_length: 1024
57
+ pretrained_ckpt_path: checkpoints/fish-speech-1.2
58
+ tokenizer:
59
+ _target_: transformers.AutoTokenizer.from_pretrained
60
+ pretrained_model_name_or_path: ${pretrained_ckpt_path}
61
+ train_dataset:
62
+ _target_: fish_speech.datasets.semantic.AutoTextSemanticInstructionDataset
63
+ proto_files:
64
+ - data/protos
65
+ tokenizer: ${tokenizer}
66
+ causal: true
67
+ max_length: ${max_length}
68
+ use_speaker: false
69
+ interactive_prob: 0.7
70
+ val_dataset:
71
+ _target_: fish_speech.datasets.semantic.AutoTextSemanticInstructionDataset
72
+ proto_files:
73
+ - data/protos
74
+ tokenizer: ${tokenizer}
75
+ causal: true
76
+ max_length: ${max_length}
77
+ use_speaker: false
78
+ interactive_prob: 0.7
79
+ data:
80
+ _target_: fish_speech.datasets.semantic.SemanticDataModule
81
+ train_dataset: ${train_dataset}
82
+ val_dataset: ${val_dataset}
83
+ num_workers: 4
84
+ batch_size: 4
85
+ tokenizer: ${tokenizer}
86
+ max_length: ${max_length}
87
+ model:
88
+ _target_: fish_speech.models.text2semantic.lit_module.TextToSemantic
89
+ model:
90
+ _target_: fish_speech.models.text2semantic.llama.BaseTransformer.from_pretrained
91
+ path: ${pretrained_ckpt_path}
92
+ load_weights: true
93
+ max_length: ${max_length}
94
+ lora_config:
95
+ _target_: fish_speech.models.text2semantic.lora.LoraConfig
96
+ r: 8
97
+ lora_alpha: 16
98
+ lora_dropout: 0.01
99
+ optimizer:
100
+ _target_: torch.optim.AdamW
101
+ _partial_: true
102
+ lr: 0.0001
103
+ weight_decay: 0.01
104
+ betas:
105
+ - 0.9
106
+ - 0.95
107
+ eps: 1.0e-05
108
+ lr_scheduler:
109
+ _target_: torch.optim.lr_scheduler.LambdaLR
110
+ _partial_: true
111
+ lr_lambda:
112
+ _target_: fish_speech.scheduler.get_constant_schedule_with_warmup_lr_lambda
113
+ _partial_: true
114
+ num_warmup_steps: 50
bolero_v1/.hydra/hydra.yaml ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: ${paths.run_dir}
4
+ sweep:
5
+ dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params: null
13
+ help:
14
+ app_name: ${hydra.job.name}
15
+ header: '${hydra.help.app_name} is powered by Hydra.
16
+
17
+ '
18
+ footer: 'Powered by Hydra (https://hydra.cc)
19
+
20
+ Use --hydra-help to view Hydra specific help
21
+
22
+ '
23
+ template: '${hydra.help.header}
24
+
25
+ == Configuration groups ==
26
+
27
+ Compose your configuration from those groups (group=option)
28
+
29
+
30
+ $APP_CONFIG_GROUPS
31
+
32
+
33
+ == Config ==
34
+
35
+ Override anything in the config (foo.bar=value)
36
+
37
+
38
+ $CONFIG
39
+
40
+
41
+ ${hydra.help.footer}
42
+
43
+ '
44
+ hydra_help:
45
+ template: 'Hydra (${hydra.runtime.version})
46
+
47
+ See https://hydra.cc for more info.
48
+
49
+
50
+ == Flags ==
51
+
52
+ $FLAGS_HELP
53
+
54
+
55
+ == Configuration groups ==
56
+
57
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
58
+ to command line)
59
+
60
+
61
+ $HYDRA_CONFIG_GROUPS
62
+
63
+
64
+ Use ''--cfg hydra'' to Show the Hydra config.
65
+
66
+ '
67
+ hydra_help: ???
68
+ hydra_logging:
69
+ version: 1
70
+ formatters:
71
+ simple:
72
+ format: '[%(asctime)s][HYDRA] %(message)s'
73
+ handlers:
74
+ console:
75
+ class: logging.StreamHandler
76
+ formatter: simple
77
+ stream: ext://sys.stdout
78
+ root:
79
+ level: INFO
80
+ handlers:
81
+ - console
82
+ loggers:
83
+ logging_example:
84
+ level: DEBUG
85
+ disable_existing_loggers: false
86
+ job_logging:
87
+ version: 1
88
+ formatters:
89
+ simple:
90
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
91
+ handlers:
92
+ console:
93
+ class: logging.StreamHandler
94
+ formatter: simple
95
+ stream: ext://sys.stdout
96
+ file:
97
+ class: logging.FileHandler
98
+ formatter: simple
99
+ filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
100
+ root:
101
+ level: INFO
102
+ handlers:
103
+ - console
104
+ - file
105
+ disable_existing_loggers: false
106
+ env: {}
107
+ mode: RUN
108
+ searchpath: []
109
+ callbacks: {}
110
+ output_subdir: .hydra
111
+ overrides:
112
+ hydra:
113
+ - hydra.mode=RUN
114
+ task:
115
+ - project=mix_v2
116
+ - +lora@model.model.lora_config=r_8_alpha_16
117
+ job:
118
+ name: train
119
+ chdir: null
120
+ override_dirname: +lora@model.model.lora_config=r_8_alpha_16,project=mix_v2
121
+ id: ???
122
+ num: ???
123
+ config_name: text2semantic_finetune
124
+ env_set: {}
125
+ env_copy: []
126
+ config:
127
+ override_dirname:
128
+ kv_sep: '='
129
+ item_sep: ','
130
+ exclude_keys: []
131
+ runtime:
132
+ version: 1.3.2
133
+ version_base: '1.3'
134
+ cwd: /mnt/f/repo_fish-speech
135
+ config_sources:
136
+ - path: hydra.conf
137
+ schema: pkg
138
+ provider: hydra
139
+ - path: /mnt/f/repo_fish-speech/fish_speech/configs
140
+ schema: file
141
+ provider: main
142
+ - path: ''
143
+ schema: structured
144
+ provider: schema
145
+ output_dir: /mnt/f/repo_fish-speech/results/mix_v2
146
+ choices:
147
+ lora@model.model.lora_config: r_8_alpha_16
148
+ hydra/env: default
149
+ hydra/callbacks: null
150
+ hydra/job_logging: default
151
+ hydra/hydra_logging: default
152
+ hydra/hydra_help: default
153
+ hydra/help: default
154
+ hydra/sweeper: basic
155
+ hydra/launcher: basic
156
+ hydra/output: default
157
+ verbose: false
bolero_v1/.hydra/overrides.yaml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ - project=mix_v2
2
+ - +lora@model.model.lora_config=r_8_alpha_16
bolero_v1/checkpoints/step_000089000.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc1d625486a34dbb7dedbf0f089933a8e220250ab2166100713b1002cfd7d644
3
+ size 30477245
bolero_v1/checkpoints/step_000090000.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5cba730e13f54bfff15393912b8d93fa901b69b0bc400e53e5862bcd0618be16
3
+ size 30477245
bolero_v1/checkpoints/step_000091000.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:095b4c91fad4f24d8d2125931c3e3866a2f6da8d43c1d688fc7a1cdab3cd729f
3
+ size 30477245
bolero_v1/tensorboard/version_0/events.out.tfevents.1720516849.DESKTOP-E8VFO84.25210.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac19bc604658768ed39be9c256ccb3d0f6034d9d50598bd42eb4d878e68276d5
3
+ size 808923
bolero_v1/tensorboard/version_0/hparams.yaml ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ _target_: fish_speech.models.text2semantic.lit_module.TextToSemantic
3
+ model:
4
+ _target_: fish_speech.models.text2semantic.llama.BaseTransformer.from_pretrained
5
+ path: checkpoints/fish-speech-1.2
6
+ load_weights: true
7
+ max_length: 1024
8
+ lora_config:
9
+ _target_: fish_speech.models.text2semantic.lora.LoraConfig
10
+ r: 8
11
+ lora_alpha: 16
12
+ lora_dropout: 0.01
13
+ optimizer:
14
+ _target_: torch.optim.AdamW
15
+ _partial_: true
16
+ lr: 0.0001
17
+ weight_decay: 0.01
18
+ betas:
19
+ - 0.9
20
+ - 0.95
21
+ eps: 1.0e-05
22
+ lr_scheduler:
23
+ _target_: torch.optim.lr_scheduler.LambdaLR
24
+ _partial_: true
25
+ lr_lambda:
26
+ _target_: fish_speech.scheduler.get_constant_schedule_with_warmup_lr_lambda
27
+ _partial_: true
28
+ num_warmup_steps: 50
29
+ model/params/total: 495286272
30
+ model/params/trainable: 5017600
31
+ model/params/non_trainable: 490268672
32
+ data:
33
+ _target_: fish_speech.datasets.semantic.SemanticDataModule
34
+ train_dataset:
35
+ _target_: fish_speech.datasets.semantic.AutoTextSemanticInstructionDataset
36
+ proto_files:
37
+ - data/protos
38
+ tokenizer:
39
+ _target_: transformers.AutoTokenizer.from_pretrained
40
+ pretrained_model_name_or_path: checkpoints/fish-speech-1.2
41
+ causal: true
42
+ max_length: 1024
43
+ use_speaker: false
44
+ interactive_prob: 0.7
45
+ val_dataset:
46
+ _target_: fish_speech.datasets.semantic.AutoTextSemanticInstructionDataset
47
+ proto_files:
48
+ - data/protos
49
+ tokenizer:
50
+ _target_: transformers.AutoTokenizer.from_pretrained
51
+ pretrained_model_name_or_path: checkpoints/fish-speech-1.2
52
+ causal: true
53
+ max_length: 1024
54
+ use_speaker: false
55
+ interactive_prob: 0.7
56
+ num_workers: 4
57
+ batch_size: 4
58
+ tokenizer:
59
+ _target_: transformers.AutoTokenizer.from_pretrained
60
+ pretrained_model_name_or_path: checkpoints/fish-speech-1.2
61
+ max_length: 1024
62
+ trainer:
63
+ _target_: lightning.pytorch.trainer.Trainer
64
+ default_root_dir: results/mix_v2
65
+ accelerator: gpu
66
+ num_nodes: 1
67
+ devices: auto
68
+ strategy:
69
+ _target_: lightning.pytorch.strategies.DDPStrategy
70
+ process_group_backend: nccl
71
+ precision: bf16-true
72
+ check_val_every_n_epoch: null
73
+ val_check_interval: 1000
74
+ max_steps: 100000
75
+ benchmark: true
76
+ accumulate_grad_batches: 1
77
+ gradient_clip_val: 1.0
78
+ gradient_clip_algorithm: norm
79
+ limit_val_batches: 10
80
+ callbacks:
81
+ model_checkpoint:
82
+ _target_: lightning.pytorch.callbacks.ModelCheckpoint
83
+ dirpath: results/mix_v2/checkpoints
84
+ filename: step_{step:09d}
85
+ save_last: false
86
+ save_top_k: 5
87
+ monitor: step
88
+ mode: max
89
+ every_n_epochs: null
90
+ every_n_train_steps: 1000
91
+ auto_insert_metric_name: false
92
+ model_summary:
93
+ _target_: lightning.pytorch.callbacks.ModelSummary
94
+ max_depth: 2
95
+ learning_rate_monitor:
96
+ _target_: lightning.pytorch.callbacks.LearningRateMonitor
97
+ logging_interval: step
98
+ log_momentum: false
99
+ grad_norm_monitor:
100
+ _target_: fish_speech.callbacks.GradNormMonitor
101
+ norm_type: 2
102
+ logging_interval: step
103
+ extras: null
104
+ task_name: null
105
+ tags: null
106
+ ckpt_path: null
107
+ seed: null
bolero_v1/train.log ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2024-07-09 15:38:01,843][__main__][INFO] - [rank: 0] Instantiating datamodule <fish_speech.datasets.semantic.SemanticDataModule>
2
+ [2024-07-09 15:38:02,439][datasets][INFO] - PyTorch version 2.3.1 available.
3
+ [2024-07-09 15:38:02,753][__main__][INFO] - [rank: 0] Instantiating model <fish_speech.models.text2semantic.lit_module.TextToSemantic>
4
+ [2024-07-09 15:38:02,781][fish_speech.models.text2semantic.llama][INFO] - [rank: 0] Override max_seq_len to 1024
5
+ [2024-07-09 15:38:02,813][fish_speech.models.text2semantic.llama][INFO] - [rank: 0] Loading model from checkpoints/mix_v1, config: DualARModelArgs(model_type='dual_ar', vocab_size=32000, n_layer=24, n_head=16, dim=1024, intermediate_size=4096, n_local_heads=2, head_dim=64, rope_base=1000000.0, norm_eps=1e-06, max_seq_len=1024, dropout=0.1, tie_word_embeddings=False, attention_qkv_bias=False, codebook_size=1024, num_codebooks=4, use_gradient_checkpointing=True, initializer_range=0.02, n_fast_layer=4)
6
+ [2024-07-09 15:38:08,926][fish_speech.models.text2semantic.llama][INFO] - [rank: 0] LoRA setup: LoraConfig(r=8, lora_alpha=16, lora_dropout=0.01)
7
+ [2024-07-09 15:38:08,990][fish_speech.models.text2semantic.llama][INFO] - [rank: 0] Loaded weights with error: _IncompatibleKeys(missing_keys=['embeddings.lora_A', 'embeddings.lora_B', 'codebook_embeddings.lora_A', 'codebook_embeddings.lora_B', 'layers.0.attention.wqkv.lora_A', 'layers.0.attention.wqkv.lora_B', 'layers.0.attention.wo.lora_A', 'layers.0.attention.wo.lora_B', 'layers.0.feed_forward.w1.lora_A', 'layers.0.feed_forward.w1.lora_B', 'layers.0.feed_forward.w3.lora_A', 'layers.0.feed_forward.w3.lora_B', 'layers.0.feed_forward.w2.lora_A', 'layers.0.feed_forward.w2.lora_B', 'layers.1.attention.wqkv.lora_A', 'layers.1.attention.wqkv.lora_B', 'layers.1.attention.wo.lora_A', 'layers.1.attention.wo.lora_B', 'layers.1.feed_forward.w1.lora_A', 'layers.1.feed_forward.w1.lora_B', 'layers.1.feed_forward.w3.lora_A', 'layers.1.feed_forward.w3.lora_B', 'layers.1.feed_forward.w2.lora_A', 'layers.1.feed_forward.w2.lora_B', 'layers.2.attention.wqkv.lora_A', 'layers.2.attention.wqkv.lora_B', 'layers.2.attention.wo.lora_A', 'layers.2.attention.wo.lora_B', 'layers.2.feed_forward.w1.lora_A', 'layers.2.feed_forward.w1.lora_B', 'layers.2.feed_forward.w3.lora_A', 'layers.2.feed_forward.w3.lora_B', 'layers.2.feed_forward.w2.lora_A', 'layers.2.feed_forward.w2.lora_B', 'layers.3.attention.wqkv.lora_A', 'layers.3.attention.wqkv.lora_B', 'layers.3.attention.wo.lora_A', 'layers.3.attention.wo.lora_B', 'layers.3.feed_forward.w1.lora_A', 'layers.3.feed_forward.w1.lora_B', 'layers.3.feed_forward.w3.lora_A', 'layers.3.feed_forward.w3.lora_B', 'layers.3.feed_forward.w2.lora_A', 'layers.3.feed_forward.w2.lora_B', 'layers.4.attention.wqkv.lora_A', 'layers.4.attention.wqkv.lora_B', 'layers.4.attention.wo.lora_A', 'layers.4.attention.wo.lora_B', 'layers.4.feed_forward.w1.lora_A', 'layers.4.feed_forward.w1.lora_B', 'layers.4.feed_forward.w3.lora_A', 'layers.4.feed_forward.w3.lora_B', 'layers.4.feed_forward.w2.lora_A', 'layers.4.feed_forward.w2.lora_B', 'layers.5.attention.wqkv.lora_A', 'layers.5.attention.wqkv.lora_B', 'layers.5.attention.wo.lora_A', 'layers.5.attention.wo.lora_B', 'layers.5.feed_forward.w1.lora_A', 'layers.5.feed_forward.w1.lora_B', 'layers.5.feed_forward.w3.lora_A', 'layers.5.feed_forward.w3.lora_B', 'layers.5.feed_forward.w2.lora_A', 'layers.5.feed_forward.w2.lora_B', 'layers.6.attention.wqkv.lora_A', 'layers.6.attention.wqkv.lora_B', 'layers.6.attention.wo.lora_A', 'layers.6.attention.wo.lora_B', 'layers.6.feed_forward.w1.lora_A', 'layers.6.feed_forward.w1.lora_B', 'layers.6.feed_forward.w3.lora_A', 'layers.6.feed_forward.w3.lora_B', 'layers.6.feed_forward.w2.lora_A', 'layers.6.feed_forward.w2.lora_B', 'layers.7.attention.wqkv.lora_A', 'layers.7.attention.wqkv.lora_B', 'layers.7.attention.wo.lora_A', 'layers.7.attention.wo.lora_B', 'layers.7.feed_forward.w1.lora_A', 'layers.7.feed_forward.w1.lora_B', 'layers.7.feed_forward.w3.lora_A', 'layers.7.feed_forward.w3.lora_B', 'layers.7.feed_forward.w2.lora_A', 'layers.7.feed_forward.w2.lora_B', 'layers.8.attention.wqkv.lora_A', 'layers.8.attention.wqkv.lora_B', 'layers.8.attention.wo.lora_A', 'layers.8.attention.wo.lora_B', 'layers.8.feed_forward.w1.lora_A', 'layers.8.feed_forward.w1.lora_B', 'layers.8.feed_forward.w3.lora_A', 'layers.8.feed_forward.w3.lora_B', 'layers.8.feed_forward.w2.lora_A', 'layers.8.feed_forward.w2.lora_B', 'layers.9.attention.wqkv.lora_A', 'layers.9.attention.wqkv.lora_B', 'layers.9.attention.wo.lora_A', 'layers.9.attention.wo.lora_B', 'layers.9.feed_forward.w1.lora_A', 'layers.9.feed_forward.w1.lora_B', 'layers.9.feed_forward.w3.lora_A', 'layers.9.feed_forward.w3.lora_B', 'layers.9.feed_forward.w2.lora_A', 'layers.9.feed_forward.w2.lora_B', 'layers.10.attention.wqkv.lora_A', 'layers.10.attention.wqkv.lora_B', 'layers.10.attention.wo.lora_A', 'layers.10.attention.wo.lora_B', 'layers.10.feed_forward.w1.lora_A', 'layers.10.feed_forward.w1.lora_B', 'layers.10.feed_forward.w3.lora_A', 'layers.10.feed_forward.w3.lora_B', 'layers.10.feed_forward.w2.lora_A', 'layers.10.feed_forward.w2.lora_B', 'layers.11.attention.wqkv.lora_A', 'layers.11.attention.wqkv.lora_B', 'layers.11.attention.wo.lora_A', 'layers.11.attention.wo.lora_B', 'layers.11.feed_forward.w1.lora_A', 'layers.11.feed_forward.w1.lora_B', 'layers.11.feed_forward.w3.lora_A', 'layers.11.feed_forward.w3.lora_B', 'layers.11.feed_forward.w2.lora_A', 'layers.11.feed_forward.w2.lora_B', 'layers.12.attention.wqkv.lora_A', 'layers.12.attention.wqkv.lora_B', 'layers.12.attention.wo.lora_A', 'layers.12.attention.wo.lora_B', 'layers.12.feed_forward.w1.lora_A', 'layers.12.feed_forward.w1.lora_B', 'layers.12.feed_forward.w3.lora_A', 'layers.12.feed_forward.w3.lora_B', 'layers.12.feed_forward.w2.lora_A', 'layers.12.feed_forward.w2.lora_B', 'layers.13.attention.wqkv.lora_A', 'layers.13.attention.wqkv.lora_B', 'layers.13.attention.wo.lora_A', 'layers.13.attention.wo.lora_B', 'layers.13.feed_forward.w1.lora_A', 'layers.13.feed_forward.w1.lora_B', 'layers.13.feed_forward.w3.lora_A', 'layers.13.feed_forward.w3.lora_B', 'layers.13.feed_forward.w2.lora_A', 'layers.13.feed_forward.w2.lora_B', 'layers.14.attention.wqkv.lora_A', 'layers.14.attention.wqkv.lora_B', 'layers.14.attention.wo.lora_A', 'layers.14.attention.wo.lora_B', 'layers.14.feed_forward.w1.lora_A', 'layers.14.feed_forward.w1.lora_B', 'layers.14.feed_forward.w3.lora_A', 'layers.14.feed_forward.w3.lora_B', 'layers.14.feed_forward.w2.lora_A', 'layers.14.feed_forward.w2.lora_B', 'layers.15.attention.wqkv.lora_A', 'layers.15.attention.wqkv.lora_B', 'layers.15.attention.wo.lora_A', 'layers.15.attention.wo.lora_B', 'layers.15.feed_forward.w1.lora_A', 'layers.15.feed_forward.w1.lora_B', 'layers.15.feed_forward.w3.lora_A', 'layers.15.feed_forward.w3.lora_B', 'layers.15.feed_forward.w2.lora_A', 'layers.15.feed_forward.w2.lora_B', 'layers.16.attention.wqkv.lora_A', 'layers.16.attention.wqkv.lora_B', 'layers.16.attention.wo.lora_A', 'layers.16.attention.wo.lora_B', 'layers.16.feed_forward.w1.lora_A', 'layers.16.feed_forward.w1.lora_B', 'layers.16.feed_forward.w3.lora_A', 'layers.16.feed_forward.w3.lora_B', 'layers.16.feed_forward.w2.lora_A', 'layers.16.feed_forward.w2.lora_B', 'layers.17.attention.wqkv.lora_A', 'layers.17.attention.wqkv.lora_B', 'layers.17.attention.wo.lora_A', 'layers.17.attention.wo.lora_B', 'layers.17.feed_forward.w1.lora_A', 'layers.17.feed_forward.w1.lora_B', 'layers.17.feed_forward.w3.lora_A', 'layers.17.feed_forward.w3.lora_B', 'layers.17.feed_forward.w2.lora_A', 'layers.17.feed_forward.w2.lora_B', 'layers.18.attention.wqkv.lora_A', 'layers.18.attention.wqkv.lora_B', 'layers.18.attention.wo.lora_A', 'layers.18.attention.wo.lora_B', 'layers.18.feed_forward.w1.lora_A', 'layers.18.feed_forward.w1.lora_B', 'layers.18.feed_forward.w3.lora_A', 'layers.18.feed_forward.w3.lora_B', 'layers.18.feed_forward.w2.lora_A', 'layers.18.feed_forward.w2.lora_B', 'layers.19.attention.wqkv.lora_A', 'layers.19.attention.wqkv.lora_B', 'layers.19.attention.wo.lora_A', 'layers.19.attention.wo.lora_B', 'layers.19.feed_forward.w1.lora_A', 'layers.19.feed_forward.w1.lora_B', 'layers.19.feed_forward.w3.lora_A', 'layers.19.feed_forward.w3.lora_B', 'layers.19.feed_forward.w2.lora_A', 'layers.19.feed_forward.w2.lora_B', 'layers.20.attention.wqkv.lora_A', 'layers.20.attention.wqkv.lora_B', 'layers.20.attention.wo.lora_A', 'layers.20.attention.wo.lora_B', 'layers.20.feed_forward.w1.lora_A', 'layers.20.feed_forward.w1.lora_B', 'layers.20.feed_forward.w3.lora_A', 'layers.20.feed_forward.w3.lora_B', 'layers.20.feed_forward.w2.lora_A', 'layers.20.feed_forward.w2.lora_B', 'layers.21.attention.wqkv.lora_A', 'layers.21.attention.wqkv.lora_B', 'layers.21.attention.wo.lora_A', 'layers.21.attention.wo.lora_B', 'layers.21.feed_forward.w1.lora_A', 'layers.21.feed_forward.w1.lora_B', 'layers.21.feed_forward.w3.lora_A', 'layers.21.feed_forward.w3.lora_B', 'layers.21.feed_forward.w2.lora_A', 'layers.21.feed_forward.w2.lora_B', 'layers.22.attention.wqkv.lora_A', 'layers.22.attention.wqkv.lora_B', 'layers.22.attention.wo.lora_A', 'layers.22.attention.wo.lora_B', 'layers.22.feed_forward.w1.lora_A', 'layers.22.feed_forward.w1.lora_B', 'layers.22.feed_forward.w3.lora_A', 'layers.22.feed_forward.w3.lora_B', 'layers.22.feed_forward.w2.lora_A', 'layers.22.feed_forward.w2.lora_B', 'layers.23.attention.wqkv.lora_A', 'layers.23.attention.wqkv.lora_B', 'layers.23.attention.wo.lora_A', 'layers.23.attention.wo.lora_B', 'layers.23.feed_forward.w1.lora_A', 'layers.23.feed_forward.w1.lora_B', 'layers.23.feed_forward.w3.lora_A', 'layers.23.feed_forward.w3.lora_B', 'layers.23.feed_forward.w2.lora_A', 'layers.23.feed_forward.w2.lora_B', 'output.lora_A', 'output.lora_B', 'fast_embeddings.lora_A', 'fast_embeddings.lora_B', 'fast_layers.0.attention.wqkv.lora_A', 'fast_layers.0.attention.wqkv.lora_B', 'fast_layers.0.attention.wo.lora_A', 'fast_layers.0.attention.wo.lora_B', 'fast_layers.0.feed_forward.w1.lora_A', 'fast_layers.0.feed_forward.w1.lora_B', 'fast_layers.0.feed_forward.w3.lora_A', 'fast_layers.0.feed_forward.w3.lora_B', 'fast_layers.0.feed_forward.w2.lora_A', 'fast_layers.0.feed_forward.w2.lora_B', 'fast_layers.1.attention.wqkv.lora_A', 'fast_layers.1.attention.wqkv.lora_B', 'fast_layers.1.attention.wo.lora_A', 'fast_layers.1.attention.wo.lora_B', 'fast_layers.1.feed_forward.w1.lora_A', 'fast_layers.1.feed_forward.w1.lora_B', 'fast_layers.1.feed_forward.w3.lora_A', 'fast_layers.1.feed_forward.w3.lora_B', 'fast_layers.1.feed_forward.w2.lora_A', 'fast_layers.1.feed_forward.w2.lora_B', 'fast_layers.2.attention.wqkv.lora_A', 'fast_layers.2.attention.wqkv.lora_B', 'fast_layers.2.attention.wo.lora_A', 'fast_layers.2.attention.wo.lora_B', 'fast_layers.2.feed_forward.w1.lora_A', 'fast_layers.2.feed_forward.w1.lora_B', 'fast_layers.2.feed_forward.w3.lora_A', 'fast_layers.2.feed_forward.w3.lora_B', 'fast_layers.2.feed_forward.w2.lora_A', 'fast_layers.2.feed_forward.w2.lora_B', 'fast_layers.3.attention.wqkv.lora_A', 'fast_layers.3.attention.wqkv.lora_B', 'fast_layers.3.attention.wo.lora_A', 'fast_layers.3.attention.wo.lora_B', 'fast_layers.3.feed_forward.w1.lora_A', 'fast_layers.3.feed_forward.w1.lora_B', 'fast_layers.3.feed_forward.w3.lora_A', 'fast_layers.3.feed_forward.w3.lora_B', 'fast_layers.3.feed_forward.w2.lora_A', 'fast_layers.3.feed_forward.w2.lora_B', 'fast_output.lora_A', 'fast_output.lora_B'], unexpected_keys=[])
8
+ [2024-07-09 15:38:08,994][__main__][INFO] - [rank: 0] Instantiating callbacks...
9
+ [2024-07-09 15:38:08,994][fish_speech.utils.instantiators][INFO] - [rank: 0] Instantiating callback <lightning.pytorch.callbacks.ModelCheckpoint>
10
+ [2024-07-09 15:38:08,997][fish_speech.utils.instantiators][INFO] - [rank: 0] Instantiating callback <lightning.pytorch.callbacks.ModelSummary>
11
+ [2024-07-09 15:38:08,997][fish_speech.utils.instantiators][INFO] - [rank: 0] Instantiating callback <lightning.pytorch.callbacks.LearningRateMonitor>
12
+ [2024-07-09 15:38:08,997][fish_speech.utils.instantiators][INFO] - [rank: 0] Instantiating callback <fish_speech.callbacks.GradNormMonitor>
13
+ [2024-07-09 15:38:09,006][__main__][INFO] - [rank: 0] Instantiating loggers...
14
+ [2024-07-09 15:38:09,006][fish_speech.utils.instantiators][INFO] - [rank: 0] Instantiating logger <lightning.pytorch.loggers.tensorboard.TensorBoardLogger>
15
+ [2024-07-09 15:38:09,018][__main__][INFO] - [rank: 0] Instantiating trainer <lightning.pytorch.trainer.Trainer>
16
+ [2024-07-09 15:38:09,064][__main__][INFO] - [rank: 0] Logging hyperparameters!
17
+ [2024-07-09 15:38:09,135][__main__][INFO] - [rank: 0] Starting training!
18
+ [2024-07-09 15:38:37,548][fish_speech.models.text2semantic.lit_module][INFO] - [rank: 0] Set weight decay: 0.005 for 432 parameters
19
+ [2024-07-09 15:38:37,548][fish_speech.models.text2semantic.lit_module][INFO] - [rank: 0] Set weight decay: 0.0 for 61 parameters
20
+ [2024-07-09 15:38:37,885][fish_speech.datasets.semantic][INFO] - [rank: 0] Reading 2 / 3 files
21
+ [2024-07-09 15:38:37,885][fish_speech.datasets.semantic][INFO] - [rank: 0] Reading 1 / 3 files
22
+ [2024-07-09 15:38:37,885][fish_speech.datasets.semantic][INFO] - [rank: 0] Reading 1 / 3 files
23
+ [2024-07-09 15:38:37,885][fish_speech.datasets.semantic][INFO] - [rank: 0] Reading 2 / 3 files
24
+ [2024-07-09 15:38:37,978][fish_speech.datasets.semantic][INFO] - [rank: 0] Read total 2 groups of data
25
+ [2024-07-09 15:38:37,980][fish_speech.datasets.semantic][INFO] - [rank: 0] Read total 1 groups of data
26
+ [2024-07-09 15:38:38,053][fish_speech.datasets.semantic][INFO] - [rank: 0] Read total 5 groups of data
27
+ [2024-07-09 15:38:38,069][fish_speech.datasets.semantic][INFO] - [rank: 0] Read total 4 groups of data
28
+ [2024-07-09 15:38:39,388][fish_speech.datasets.semantic][INFO] - [rank: 0] Reading 2 / 3 files
29
+ [2024-07-09 15:38:39,388][fish_speech.datasets.semantic][INFO] - [rank: 0] Reading 2 / 3 files
30
+ [2024-07-09 15:38:39,388][fish_speech.datasets.semantic][INFO] - [rank: 0] Reading 1 / 3 files
31
+ [2024-07-09 15:38:39,389][fish_speech.datasets.semantic][INFO] - [rank: 0] Reading 1 / 3 files
32
+ [2024-07-09 15:38:39,477][fish_speech.datasets.semantic][INFO] - [rank: 0] Read total 1 groups of data
33
+ [2024-07-09 15:38:39,482][fish_speech.datasets.semantic][INFO] - [rank: 0] Read total 2 groups of data
34
+ [2024-07-09 15:38:39,555][fish_speech.datasets.semantic][INFO] - [rank: 0] Read total 5 groups of data
35
+ [2024-07-09 15:38:39,557][fish_speech.datasets.semantic][INFO] - [rank: 0] Read total 4 groups of data
36
+ [2024-07-09 15:43:03,202][__main__][INFO] - [rank: 0] Instantiating datamodule <fish_speech.datasets.semantic.SemanticDataModule>
37
+ [2024-07-09 15:43:03,439][datasets][INFO] - PyTorch version 2.3.1 available.
38
+ [2024-07-09 15:43:03,659][__main__][INFO] - [rank: 0] Instantiating model <fish_speech.models.text2semantic.lit_module.TextToSemantic>
39
+ [2024-07-09 15:43:03,688][fish_speech.models.text2semantic.llama][INFO] - [rank: 0] Override max_seq_len to 1024
40
+ [2024-07-09 15:43:03,723][fish_speech.models.text2semantic.llama][INFO] - [rank: 0] Loading model from checkpoints/mix_v1, config: DualARModelArgs(model_type='dual_ar', vocab_size=32000, n_layer=24, n_head=16, dim=1024, intermediate_size=4096, n_local_heads=2, head_dim=64, rope_base=1000000.0, norm_eps=1e-06, max_seq_len=1024, dropout=0.1, tie_word_embeddings=False, attention_qkv_bias=False, codebook_size=1024, num_codebooks=4, use_gradient_checkpointing=True, initializer_range=0.02, n_fast_layer=4)
41
+ [2024-07-09 15:43:09,799][fish_speech.models.text2semantic.llama][INFO] - [rank: 0] LoRA setup: LoraConfig(r=8, lora_alpha=16, lora_dropout=0.01)
42
+ [2024-07-09 15:43:09,830][fish_speech.models.text2semantic.llama][INFO] - [rank: 0] Loaded weights with error: _IncompatibleKeys(missing_keys=['embeddings.lora_A', 'embeddings.lora_B', 'codebook_embeddings.lora_A', 'codebook_embeddings.lora_B', 'layers.0.attention.wqkv.lora_A', 'layers.0.attention.wqkv.lora_B', 'layers.0.attention.wo.lora_A', 'layers.0.attention.wo.lora_B', 'layers.0.feed_forward.w1.lora_A', 'layers.0.feed_forward.w1.lora_B', 'layers.0.feed_forward.w3.lora_A', 'layers.0.feed_forward.w3.lora_B', 'layers.0.feed_forward.w2.lora_A', 'layers.0.feed_forward.w2.lora_B', 'layers.1.attention.wqkv.lora_A', 'layers.1.attention.wqkv.lora_B', 'layers.1.attention.wo.lora_A', 'layers.1.attention.wo.lora_B', 'layers.1.feed_forward.w1.lora_A', 'layers.1.feed_forward.w1.lora_B', 'layers.1.feed_forward.w3.lora_A', 'layers.1.feed_forward.w3.lora_B', 'layers.1.feed_forward.w2.lora_A', 'layers.1.feed_forward.w2.lora_B', 'layers.2.attention.wqkv.lora_A', 'layers.2.attention.wqkv.lora_B', 'layers.2.attention.wo.lora_A', 'layers.2.attention.wo.lora_B', 'layers.2.feed_forward.w1.lora_A', 'layers.2.feed_forward.w1.lora_B', 'layers.2.feed_forward.w3.lora_A', 'layers.2.feed_forward.w3.lora_B', 'layers.2.feed_forward.w2.lora_A', 'layers.2.feed_forward.w2.lora_B', 'layers.3.attention.wqkv.lora_A', 'layers.3.attention.wqkv.lora_B', 'layers.3.attention.wo.lora_A', 'layers.3.attention.wo.lora_B', 'layers.3.feed_forward.w1.lora_A', 'layers.3.feed_forward.w1.lora_B', 'layers.3.feed_forward.w3.lora_A', 'layers.3.feed_forward.w3.lora_B', 'layers.3.feed_forward.w2.lora_A', 'layers.3.feed_forward.w2.lora_B', 'layers.4.attention.wqkv.lora_A', 'layers.4.attention.wqkv.lora_B', 'layers.4.attention.wo.lora_A', 'layers.4.attention.wo.lora_B', 'layers.4.feed_forward.w1.lora_A', 'layers.4.feed_forward.w1.lora_B', 'layers.4.feed_forward.w3.lora_A', 'layers.4.feed_forward.w3.lora_B', 'layers.4.feed_forward.w2.lora_A', 'layers.4.feed_forward.w2.lora_B', 'layers.5.attention.wqkv.lora_A', 'layers.5.attention.wqkv.lora_B', 'layers.5.attention.wo.lora_A', 'layers.5.attention.wo.lora_B', 'layers.5.feed_forward.w1.lora_A', 'layers.5.feed_forward.w1.lora_B', 'layers.5.feed_forward.w3.lora_A', 'layers.5.feed_forward.w3.lora_B', 'layers.5.feed_forward.w2.lora_A', 'layers.5.feed_forward.w2.lora_B', 'layers.6.attention.wqkv.lora_A', 'layers.6.attention.wqkv.lora_B', 'layers.6.attention.wo.lora_A', 'layers.6.attention.wo.lora_B', 'layers.6.feed_forward.w1.lora_A', 'layers.6.feed_forward.w1.lora_B', 'layers.6.feed_forward.w3.lora_A', 'layers.6.feed_forward.w3.lora_B', 'layers.6.feed_forward.w2.lora_A', 'layers.6.feed_forward.w2.lora_B', 'layers.7.attention.wqkv.lora_A', 'layers.7.attention.wqkv.lora_B', 'layers.7.attention.wo.lora_A', 'layers.7.attention.wo.lora_B', 'layers.7.feed_forward.w1.lora_A', 'layers.7.feed_forward.w1.lora_B', 'layers.7.feed_forward.w3.lora_A', 'layers.7.feed_forward.w3.lora_B', 'layers.7.feed_forward.w2.lora_A', 'layers.7.feed_forward.w2.lora_B', 'layers.8.attention.wqkv.lora_A', 'layers.8.attention.wqkv.lora_B', 'layers.8.attention.wo.lora_A', 'layers.8.attention.wo.lora_B', 'layers.8.feed_forward.w1.lora_A', 'layers.8.feed_forward.w1.lora_B', 'layers.8.feed_forward.w3.lora_A', 'layers.8.feed_forward.w3.lora_B', 'layers.8.feed_forward.w2.lora_A', 'layers.8.feed_forward.w2.lora_B', 'layers.9.attention.wqkv.lora_A', 'layers.9.attention.wqkv.lora_B', 'layers.9.attention.wo.lora_A', 'layers.9.attention.wo.lora_B', 'layers.9.feed_forward.w1.lora_A', 'layers.9.feed_forward.w1.lora_B', 'layers.9.feed_forward.w3.lora_A', 'layers.9.feed_forward.w3.lora_B', 'layers.9.feed_forward.w2.lora_A', 'layers.9.feed_forward.w2.lora_B', 'layers.10.attention.wqkv.lora_A', 'layers.10.attention.wqkv.lora_B', 'layers.10.attention.wo.lora_A', 'layers.10.attention.wo.lora_B', 'layers.10.feed_forward.w1.lora_A', 'layers.10.feed_forward.w1.lora_B', 'layers.10.feed_forward.w3.lora_A', 'layers.10.feed_forward.w3.lora_B', 'layers.10.feed_forward.w2.lora_A', 'layers.10.feed_forward.w2.lora_B', 'layers.11.attention.wqkv.lora_A', 'layers.11.attention.wqkv.lora_B', 'layers.11.attention.wo.lora_A', 'layers.11.attention.wo.lora_B', 'layers.11.feed_forward.w1.lora_A', 'layers.11.feed_forward.w1.lora_B', 'layers.11.feed_forward.w3.lora_A', 'layers.11.feed_forward.w3.lora_B', 'layers.11.feed_forward.w2.lora_A', 'layers.11.feed_forward.w2.lora_B', 'layers.12.attention.wqkv.lora_A', 'layers.12.attention.wqkv.lora_B', 'layers.12.attention.wo.lora_A', 'layers.12.attention.wo.lora_B', 'layers.12.feed_forward.w1.lora_A', 'layers.12.feed_forward.w1.lora_B', 'layers.12.feed_forward.w3.lora_A', 'layers.12.feed_forward.w3.lora_B', 'layers.12.feed_forward.w2.lora_A', 'layers.12.feed_forward.w2.lora_B', 'layers.13.attention.wqkv.lora_A', 'layers.13.attention.wqkv.lora_B', 'layers.13.attention.wo.lora_A', 'layers.13.attention.wo.lora_B', 'layers.13.feed_forward.w1.lora_A', 'layers.13.feed_forward.w1.lora_B', 'layers.13.feed_forward.w3.lora_A', 'layers.13.feed_forward.w3.lora_B', 'layers.13.feed_forward.w2.lora_A', 'layers.13.feed_forward.w2.lora_B', 'layers.14.attention.wqkv.lora_A', 'layers.14.attention.wqkv.lora_B', 'layers.14.attention.wo.lora_A', 'layers.14.attention.wo.lora_B', 'layers.14.feed_forward.w1.lora_A', 'layers.14.feed_forward.w1.lora_B', 'layers.14.feed_forward.w3.lora_A', 'layers.14.feed_forward.w3.lora_B', 'layers.14.feed_forward.w2.lora_A', 'layers.14.feed_forward.w2.lora_B', 'layers.15.attention.wqkv.lora_A', 'layers.15.attention.wqkv.lora_B', 'layers.15.attention.wo.lora_A', 'layers.15.attention.wo.lora_B', 'layers.15.feed_forward.w1.lora_A', 'layers.15.feed_forward.w1.lora_B', 'layers.15.feed_forward.w3.lora_A', 'layers.15.feed_forward.w3.lora_B', 'layers.15.feed_forward.w2.lora_A', 'layers.15.feed_forward.w2.lora_B', 'layers.16.attention.wqkv.lora_A', 'layers.16.attention.wqkv.lora_B', 'layers.16.attention.wo.lora_A', 'layers.16.attention.wo.lora_B', 'layers.16.feed_forward.w1.lora_A', 'layers.16.feed_forward.w1.lora_B', 'layers.16.feed_forward.w3.lora_A', 'layers.16.feed_forward.w3.lora_B', 'layers.16.feed_forward.w2.lora_A', 'layers.16.feed_forward.w2.lora_B', 'layers.17.attention.wqkv.lora_A', 'layers.17.attention.wqkv.lora_B', 'layers.17.attention.wo.lora_A', 'layers.17.attention.wo.lora_B', 'layers.17.feed_forward.w1.lora_A', 'layers.17.feed_forward.w1.lora_B', 'layers.17.feed_forward.w3.lora_A', 'layers.17.feed_forward.w3.lora_B', 'layers.17.feed_forward.w2.lora_A', 'layers.17.feed_forward.w2.lora_B', 'layers.18.attention.wqkv.lora_A', 'layers.18.attention.wqkv.lora_B', 'layers.18.attention.wo.lora_A', 'layers.18.attention.wo.lora_B', 'layers.18.feed_forward.w1.lora_A', 'layers.18.feed_forward.w1.lora_B', 'layers.18.feed_forward.w3.lora_A', 'layers.18.feed_forward.w3.lora_B', 'layers.18.feed_forward.w2.lora_A', 'layers.18.feed_forward.w2.lora_B', 'layers.19.attention.wqkv.lora_A', 'layers.19.attention.wqkv.lora_B', 'layers.19.attention.wo.lora_A', 'layers.19.attention.wo.lora_B', 'layers.19.feed_forward.w1.lora_A', 'layers.19.feed_forward.w1.lora_B', 'layers.19.feed_forward.w3.lora_A', 'layers.19.feed_forward.w3.lora_B', 'layers.19.feed_forward.w2.lora_A', 'layers.19.feed_forward.w2.lora_B', 'layers.20.attention.wqkv.lora_A', 'layers.20.attention.wqkv.lora_B', 'layers.20.attention.wo.lora_A', 'layers.20.attention.wo.lora_B', 'layers.20.feed_forward.w1.lora_A', 'layers.20.feed_forward.w1.lora_B', 'layers.20.feed_forward.w3.lora_A', 'layers.20.feed_forward.w3.lora_B', 'layers.20.feed_forward.w2.lora_A', 'layers.20.feed_forward.w2.lora_B', 'layers.21.attention.wqkv.lora_A', 'layers.21.attention.wqkv.lora_B', 'layers.21.attention.wo.lora_A', 'layers.21.attention.wo.lora_B', 'layers.21.feed_forward.w1.lora_A', 'layers.21.feed_forward.w1.lora_B', 'layers.21.feed_forward.w3.lora_A', 'layers.21.feed_forward.w3.lora_B', 'layers.21.feed_forward.w2.lora_A', 'layers.21.feed_forward.w2.lora_B', 'layers.22.attention.wqkv.lora_A', 'layers.22.attention.wqkv.lora_B', 'layers.22.attention.wo.lora_A', 'layers.22.attention.wo.lora_B', 'layers.22.feed_forward.w1.lora_A', 'layers.22.feed_forward.w1.lora_B', 'layers.22.feed_forward.w3.lora_A', 'layers.22.feed_forward.w3.lora_B', 'layers.22.feed_forward.w2.lora_A', 'layers.22.feed_forward.w2.lora_B', 'layers.23.attention.wqkv.lora_A', 'layers.23.attention.wqkv.lora_B', 'layers.23.attention.wo.lora_A', 'layers.23.attention.wo.lora_B', 'layers.23.feed_forward.w1.lora_A', 'layers.23.feed_forward.w1.lora_B', 'layers.23.feed_forward.w3.lora_A', 'layers.23.feed_forward.w3.lora_B', 'layers.23.feed_forward.w2.lora_A', 'layers.23.feed_forward.w2.lora_B', 'output.lora_A', 'output.lora_B', 'fast_embeddings.lora_A', 'fast_embeddings.lora_B', 'fast_layers.0.attention.wqkv.lora_A', 'fast_layers.0.attention.wqkv.lora_B', 'fast_layers.0.attention.wo.lora_A', 'fast_layers.0.attention.wo.lora_B', 'fast_layers.0.feed_forward.w1.lora_A', 'fast_layers.0.feed_forward.w1.lora_B', 'fast_layers.0.feed_forward.w3.lora_A', 'fast_layers.0.feed_forward.w3.lora_B', 'fast_layers.0.feed_forward.w2.lora_A', 'fast_layers.0.feed_forward.w2.lora_B', 'fast_layers.1.attention.wqkv.lora_A', 'fast_layers.1.attention.wqkv.lora_B', 'fast_layers.1.attention.wo.lora_A', 'fast_layers.1.attention.wo.lora_B', 'fast_layers.1.feed_forward.w1.lora_A', 'fast_layers.1.feed_forward.w1.lora_B', 'fast_layers.1.feed_forward.w3.lora_A', 'fast_layers.1.feed_forward.w3.lora_B', 'fast_layers.1.feed_forward.w2.lora_A', 'fast_layers.1.feed_forward.w2.lora_B', 'fast_layers.2.attention.wqkv.lora_A', 'fast_layers.2.attention.wqkv.lora_B', 'fast_layers.2.attention.wo.lora_A', 'fast_layers.2.attention.wo.lora_B', 'fast_layers.2.feed_forward.w1.lora_A', 'fast_layers.2.feed_forward.w1.lora_B', 'fast_layers.2.feed_forward.w3.lora_A', 'fast_layers.2.feed_forward.w3.lora_B', 'fast_layers.2.feed_forward.w2.lora_A', 'fast_layers.2.feed_forward.w2.lora_B', 'fast_layers.3.attention.wqkv.lora_A', 'fast_layers.3.attention.wqkv.lora_B', 'fast_layers.3.attention.wo.lora_A', 'fast_layers.3.attention.wo.lora_B', 'fast_layers.3.feed_forward.w1.lora_A', 'fast_layers.3.feed_forward.w1.lora_B', 'fast_layers.3.feed_forward.w3.lora_A', 'fast_layers.3.feed_forward.w3.lora_B', 'fast_layers.3.feed_forward.w2.lora_A', 'fast_layers.3.feed_forward.w2.lora_B', 'fast_output.lora_A', 'fast_output.lora_B'], unexpected_keys=[])
43
+ [2024-07-09 15:43:09,834][__main__][INFO] - [rank: 0] Instantiating callbacks...
44
+ [2024-07-09 15:43:09,834][fish_speech.utils.instantiators][INFO] - [rank: 0] Instantiating callback <lightning.pytorch.callbacks.ModelCheckpoint>
45
+ [2024-07-09 15:43:09,837][fish_speech.utils.instantiators][INFO] - [rank: 0] Instantiating callback <lightning.pytorch.callbacks.ModelSummary>
46
+ [2024-07-09 15:43:09,837][fish_speech.utils.instantiators][INFO] - [rank: 0] Instantiating callback <lightning.pytorch.callbacks.LearningRateMonitor>
47
+ [2024-07-09 15:43:09,838][fish_speech.utils.instantiators][INFO] - [rank: 0] Instantiating callback <fish_speech.callbacks.GradNormMonitor>
48
+ [2024-07-09 15:43:09,846][__main__][INFO] - [rank: 0] Instantiating loggers...
49
+ [2024-07-09 15:43:09,846][fish_speech.utils.instantiators][INFO] - [rank: 0] Instantiating logger <lightning.pytorch.loggers.tensorboard.TensorBoardLogger>
50
+ [2024-07-09 15:43:09,851][__main__][INFO] - [rank: 0] Instantiating trainer <lightning.pytorch.trainer.Trainer>
51
+ [2024-07-09 15:43:09,895][__main__][INFO] - [rank: 0] Logging hyperparameters!
52
+ [2024-07-09 15:43:09,926][__main__][INFO] - [rank: 0] Starting training!
53
+ [2024-07-09 15:43:24,580][fish_speech.models.text2semantic.lit_module][INFO] - [rank: 0] Set weight decay: 0.005 for 432 parameters
54
+ [2024-07-09 15:43:24,581][fish_speech.models.text2semantic.lit_module][INFO] - [rank: 0] Set weight decay: 0.0 for 61 parameters
55
+ [2024-07-09 15:43:24,890][fish_speech.datasets.semantic][INFO] - [rank: 0] Reading 2 / 3 files
56
+ [2024-07-09 15:43:24,890][fish_speech.datasets.semantic][INFO] - [rank: 0] Reading 1 / 3 files
57
+ [2024-07-09 15:43:24,890][fish_speech.datasets.semantic][INFO] - [rank: 0] Reading 1 / 3 files
58
+ [2024-07-09 15:43:24,890][fish_speech.datasets.semantic][INFO] - [rank: 0] Reading 2 / 3 files
59
+ [2024-07-09 15:43:24,986][fish_speech.datasets.semantic][INFO] - [rank: 0] Read total 2 groups of data
60
+ [2024-07-09 15:43:25,009][fish_speech.datasets.semantic][INFO] - [rank: 0] Read total 1 groups of data
61
+ [2024-07-09 15:43:25,048][fish_speech.datasets.semantic][INFO] - [rank: 0] Read total 5 groups of data
62
+ [2024-07-09 15:43:25,063][fish_speech.datasets.semantic][INFO] - [rank: 0] Read total 4 groups of data
63
+ [2024-07-09 15:43:25,922][fish_speech.datasets.semantic][INFO] - [rank: 0] Reading 2 / 3 files
64
+ [2024-07-09 15:43:25,937][fish_speech.datasets.semantic][INFO] - [rank: 0] Reading 1 / 3 files
65
+ [2024-07-09 15:43:25,937][fish_speech.datasets.semantic][INFO] - [rank: 0] Reading 2 / 3 files
66
+ [2024-07-09 15:43:25,937][fish_speech.datasets.semantic][INFO] - [rank: 0] Reading 1 / 3 files
67
+ [2024-07-09 15:43:26,020][fish_speech.datasets.semantic][INFO] - [rank: 0] Read total 1 groups of data
68
+ [2024-07-09 15:43:26,024][fish_speech.datasets.semantic][INFO] - [rank: 0] Read total 2 groups of data
69
+ [2024-07-09 15:43:26,073][fish_speech.datasets.semantic][INFO] - [rank: 0] Read total 5 groups of data
70
+ [2024-07-09 15:43:26,098][fish_speech.datasets.semantic][INFO] - [rank: 0] Read total 4 groups of data
71
+ [2024-07-09 16:20:42,530][__main__][INFO] - [rank: 0] Instantiating datamodule <fish_speech.datasets.semantic.SemanticDataModule>
72
+ [2024-07-09 16:20:43,123][datasets][INFO] - PyTorch version 2.3.1 available.
73
+ [2024-07-09 16:20:43,419][__main__][INFO] - [rank: 0] Instantiating model <fish_speech.models.text2semantic.lit_module.TextToSemantic>
74
+ [2024-07-09 16:20:43,452][fish_speech.models.text2semantic.llama][INFO] - [rank: 0] Override max_seq_len to 1024
75
+ [2024-07-09 16:20:43,483][fish_speech.models.text2semantic.llama][INFO] - [rank: 0] Loading model from checkpoints/fish-speech-1.2, config: DualARModelArgs(model_type='dual_ar', vocab_size=32000, n_layer=24, n_head=16, dim=1024, intermediate_size=4096, n_local_heads=2, head_dim=64, rope_base=1000000.0, norm_eps=1e-06, max_seq_len=1024, dropout=0.1, tie_word_embeddings=False, attention_qkv_bias=False, codebook_size=1024, num_codebooks=4, use_gradient_checkpointing=True, initializer_range=0.02, n_fast_layer=4)
76
+ [2024-07-09 16:20:49,542][fish_speech.models.text2semantic.llama][INFO] - [rank: 0] LoRA setup: LoraConfig(r=8, lora_alpha=16, lora_dropout=0.01)
77
+ [2024-07-09 16:20:49,572][fish_speech.models.text2semantic.llama][INFO] - [rank: 0] Loaded weights with error: _IncompatibleKeys(missing_keys=['embeddings.lora_A', 'embeddings.lora_B', 'codebook_embeddings.lora_A', 'codebook_embeddings.lora_B', 'layers.0.attention.wqkv.lora_A', 'layers.0.attention.wqkv.lora_B', 'layers.0.attention.wo.lora_A', 'layers.0.attention.wo.lora_B', 'layers.0.feed_forward.w1.lora_A', 'layers.0.feed_forward.w1.lora_B', 'layers.0.feed_forward.w3.lora_A', 'layers.0.feed_forward.w3.lora_B', 'layers.0.feed_forward.w2.lora_A', 'layers.0.feed_forward.w2.lora_B', 'layers.1.attention.wqkv.lora_A', 'layers.1.attention.wqkv.lora_B', 'layers.1.attention.wo.lora_A', 'layers.1.attention.wo.lora_B', 'layers.1.feed_forward.w1.lora_A', 'layers.1.feed_forward.w1.lora_B', 'layers.1.feed_forward.w3.lora_A', 'layers.1.feed_forward.w3.lora_B', 'layers.1.feed_forward.w2.lora_A', 'layers.1.feed_forward.w2.lora_B', 'layers.2.attention.wqkv.lora_A', 'layers.2.attention.wqkv.lora_B', 'layers.2.attention.wo.lora_A', 'layers.2.attention.wo.lora_B', 'layers.2.feed_forward.w1.lora_A', 'layers.2.feed_forward.w1.lora_B', 'layers.2.feed_forward.w3.lora_A', 'layers.2.feed_forward.w3.lora_B', 'layers.2.feed_forward.w2.lora_A', 'layers.2.feed_forward.w2.lora_B', 'layers.3.attention.wqkv.lora_A', 'layers.3.attention.wqkv.lora_B', 'layers.3.attention.wo.lora_A', 'layers.3.attention.wo.lora_B', 'layers.3.feed_forward.w1.lora_A', 'layers.3.feed_forward.w1.lora_B', 'layers.3.feed_forward.w3.lora_A', 'layers.3.feed_forward.w3.lora_B', 'layers.3.feed_forward.w2.lora_A', 'layers.3.feed_forward.w2.lora_B', 'layers.4.attention.wqkv.lora_A', 'layers.4.attention.wqkv.lora_B', 'layers.4.attention.wo.lora_A', 'layers.4.attention.wo.lora_B', 'layers.4.feed_forward.w1.lora_A', 'layers.4.feed_forward.w1.lora_B', 'layers.4.feed_forward.w3.lora_A', 'layers.4.feed_forward.w3.lora_B', 'layers.4.feed_forward.w2.lora_A', 'layers.4.feed_forward.w2.lora_B', 'layers.5.attention.wqkv.lora_A', 'layers.5.attention.wqkv.lora_B', 'layers.5.attention.wo.lora_A', 'layers.5.attention.wo.lora_B', 'layers.5.feed_forward.w1.lora_A', 'layers.5.feed_forward.w1.lora_B', 'layers.5.feed_forward.w3.lora_A', 'layers.5.feed_forward.w3.lora_B', 'layers.5.feed_forward.w2.lora_A', 'layers.5.feed_forward.w2.lora_B', 'layers.6.attention.wqkv.lora_A', 'layers.6.attention.wqkv.lora_B', 'layers.6.attention.wo.lora_A', 'layers.6.attention.wo.lora_B', 'layers.6.feed_forward.w1.lora_A', 'layers.6.feed_forward.w1.lora_B', 'layers.6.feed_forward.w3.lora_A', 'layers.6.feed_forward.w3.lora_B', 'layers.6.feed_forward.w2.lora_A', 'layers.6.feed_forward.w2.lora_B', 'layers.7.attention.wqkv.lora_A', 'layers.7.attention.wqkv.lora_B', 'layers.7.attention.wo.lora_A', 'layers.7.attention.wo.lora_B', 'layers.7.feed_forward.w1.lora_A', 'layers.7.feed_forward.w1.lora_B', 'layers.7.feed_forward.w3.lora_A', 'layers.7.feed_forward.w3.lora_B', 'layers.7.feed_forward.w2.lora_A', 'layers.7.feed_forward.w2.lora_B', 'layers.8.attention.wqkv.lora_A', 'layers.8.attention.wqkv.lora_B', 'layers.8.attention.wo.lora_A', 'layers.8.attention.wo.lora_B', 'layers.8.feed_forward.w1.lora_A', 'layers.8.feed_forward.w1.lora_B', 'layers.8.feed_forward.w3.lora_A', 'layers.8.feed_forward.w3.lora_B', 'layers.8.feed_forward.w2.lora_A', 'layers.8.feed_forward.w2.lora_B', 'layers.9.attention.wqkv.lora_A', 'layers.9.attention.wqkv.lora_B', 'layers.9.attention.wo.lora_A', 'layers.9.attention.wo.lora_B', 'layers.9.feed_forward.w1.lora_A', 'layers.9.feed_forward.w1.lora_B', 'layers.9.feed_forward.w3.lora_A', 'layers.9.feed_forward.w3.lora_B', 'layers.9.feed_forward.w2.lora_A', 'layers.9.feed_forward.w2.lora_B', 'layers.10.attention.wqkv.lora_A', 'layers.10.attention.wqkv.lora_B', 'layers.10.attention.wo.lora_A', 'layers.10.attention.wo.lora_B', 'layers.10.feed_forward.w1.lora_A', 'layers.10.feed_forward.w1.lora_B', 'layers.10.feed_forward.w3.lora_A', 'layers.10.feed_forward.w3.lora_B', 'layers.10.feed_forward.w2.lora_A', 'layers.10.feed_forward.w2.lora_B', 'layers.11.attention.wqkv.lora_A', 'layers.11.attention.wqkv.lora_B', 'layers.11.attention.wo.lora_A', 'layers.11.attention.wo.lora_B', 'layers.11.feed_forward.w1.lora_A', 'layers.11.feed_forward.w1.lora_B', 'layers.11.feed_forward.w3.lora_A', 'layers.11.feed_forward.w3.lora_B', 'layers.11.feed_forward.w2.lora_A', 'layers.11.feed_forward.w2.lora_B', 'layers.12.attention.wqkv.lora_A', 'layers.12.attention.wqkv.lora_B', 'layers.12.attention.wo.lora_A', 'layers.12.attention.wo.lora_B', 'layers.12.feed_forward.w1.lora_A', 'layers.12.feed_forward.w1.lora_B', 'layers.12.feed_forward.w3.lora_A', 'layers.12.feed_forward.w3.lora_B', 'layers.12.feed_forward.w2.lora_A', 'layers.12.feed_forward.w2.lora_B', 'layers.13.attention.wqkv.lora_A', 'layers.13.attention.wqkv.lora_B', 'layers.13.attention.wo.lora_A', 'layers.13.attention.wo.lora_B', 'layers.13.feed_forward.w1.lora_A', 'layers.13.feed_forward.w1.lora_B', 'layers.13.feed_forward.w3.lora_A', 'layers.13.feed_forward.w3.lora_B', 'layers.13.feed_forward.w2.lora_A', 'layers.13.feed_forward.w2.lora_B', 'layers.14.attention.wqkv.lora_A', 'layers.14.attention.wqkv.lora_B', 'layers.14.attention.wo.lora_A', 'layers.14.attention.wo.lora_B', 'layers.14.feed_forward.w1.lora_A', 'layers.14.feed_forward.w1.lora_B', 'layers.14.feed_forward.w3.lora_A', 'layers.14.feed_forward.w3.lora_B', 'layers.14.feed_forward.w2.lora_A', 'layers.14.feed_forward.w2.lora_B', 'layers.15.attention.wqkv.lora_A', 'layers.15.attention.wqkv.lora_B', 'layers.15.attention.wo.lora_A', 'layers.15.attention.wo.lora_B', 'layers.15.feed_forward.w1.lora_A', 'layers.15.feed_forward.w1.lora_B', 'layers.15.feed_forward.w3.lora_A', 'layers.15.feed_forward.w3.lora_B', 'layers.15.feed_forward.w2.lora_A', 'layers.15.feed_forward.w2.lora_B', 'layers.16.attention.wqkv.lora_A', 'layers.16.attention.wqkv.lora_B', 'layers.16.attention.wo.lora_A', 'layers.16.attention.wo.lora_B', 'layers.16.feed_forward.w1.lora_A', 'layers.16.feed_forward.w1.lora_B', 'layers.16.feed_forward.w3.lora_A', 'layers.16.feed_forward.w3.lora_B', 'layers.16.feed_forward.w2.lora_A', 'layers.16.feed_forward.w2.lora_B', 'layers.17.attention.wqkv.lora_A', 'layers.17.attention.wqkv.lora_B', 'layers.17.attention.wo.lora_A', 'layers.17.attention.wo.lora_B', 'layers.17.feed_forward.w1.lora_A', 'layers.17.feed_forward.w1.lora_B', 'layers.17.feed_forward.w3.lora_A', 'layers.17.feed_forward.w3.lora_B', 'layers.17.feed_forward.w2.lora_A', 'layers.17.feed_forward.w2.lora_B', 'layers.18.attention.wqkv.lora_A', 'layers.18.attention.wqkv.lora_B', 'layers.18.attention.wo.lora_A', 'layers.18.attention.wo.lora_B', 'layers.18.feed_forward.w1.lora_A', 'layers.18.feed_forward.w1.lora_B', 'layers.18.feed_forward.w3.lora_A', 'layers.18.feed_forward.w3.lora_B', 'layers.18.feed_forward.w2.lora_A', 'layers.18.feed_forward.w2.lora_B', 'layers.19.attention.wqkv.lora_A', 'layers.19.attention.wqkv.lora_B', 'layers.19.attention.wo.lora_A', 'layers.19.attention.wo.lora_B', 'layers.19.feed_forward.w1.lora_A', 'layers.19.feed_forward.w1.lora_B', 'layers.19.feed_forward.w3.lora_A', 'layers.19.feed_forward.w3.lora_B', 'layers.19.feed_forward.w2.lora_A', 'layers.19.feed_forward.w2.lora_B', 'layers.20.attention.wqkv.lora_A', 'layers.20.attention.wqkv.lora_B', 'layers.20.attention.wo.lora_A', 'layers.20.attention.wo.lora_B', 'layers.20.feed_forward.w1.lora_A', 'layers.20.feed_forward.w1.lora_B', 'layers.20.feed_forward.w3.lora_A', 'layers.20.feed_forward.w3.lora_B', 'layers.20.feed_forward.w2.lora_A', 'layers.20.feed_forward.w2.lora_B', 'layers.21.attention.wqkv.lora_A', 'layers.21.attention.wqkv.lora_B', 'layers.21.attention.wo.lora_A', 'layers.21.attention.wo.lora_B', 'layers.21.feed_forward.w1.lora_A', 'layers.21.feed_forward.w1.lora_B', 'layers.21.feed_forward.w3.lora_A', 'layers.21.feed_forward.w3.lora_B', 'layers.21.feed_forward.w2.lora_A', 'layers.21.feed_forward.w2.lora_B', 'layers.22.attention.wqkv.lora_A', 'layers.22.attention.wqkv.lora_B', 'layers.22.attention.wo.lora_A', 'layers.22.attention.wo.lora_B', 'layers.22.feed_forward.w1.lora_A', 'layers.22.feed_forward.w1.lora_B', 'layers.22.feed_forward.w3.lora_A', 'layers.22.feed_forward.w3.lora_B', 'layers.22.feed_forward.w2.lora_A', 'layers.22.feed_forward.w2.lora_B', 'layers.23.attention.wqkv.lora_A', 'layers.23.attention.wqkv.lora_B', 'layers.23.attention.wo.lora_A', 'layers.23.attention.wo.lora_B', 'layers.23.feed_forward.w1.lora_A', 'layers.23.feed_forward.w1.lora_B', 'layers.23.feed_forward.w3.lora_A', 'layers.23.feed_forward.w3.lora_B', 'layers.23.feed_forward.w2.lora_A', 'layers.23.feed_forward.w2.lora_B', 'output.lora_A', 'output.lora_B', 'fast_embeddings.lora_A', 'fast_embeddings.lora_B', 'fast_layers.0.attention.wqkv.lora_A', 'fast_layers.0.attention.wqkv.lora_B', 'fast_layers.0.attention.wo.lora_A', 'fast_layers.0.attention.wo.lora_B', 'fast_layers.0.feed_forward.w1.lora_A', 'fast_layers.0.feed_forward.w1.lora_B', 'fast_layers.0.feed_forward.w3.lora_A', 'fast_layers.0.feed_forward.w3.lora_B', 'fast_layers.0.feed_forward.w2.lora_A', 'fast_layers.0.feed_forward.w2.lora_B', 'fast_layers.1.attention.wqkv.lora_A', 'fast_layers.1.attention.wqkv.lora_B', 'fast_layers.1.attention.wo.lora_A', 'fast_layers.1.attention.wo.lora_B', 'fast_layers.1.feed_forward.w1.lora_A', 'fast_layers.1.feed_forward.w1.lora_B', 'fast_layers.1.feed_forward.w3.lora_A', 'fast_layers.1.feed_forward.w3.lora_B', 'fast_layers.1.feed_forward.w2.lora_A', 'fast_layers.1.feed_forward.w2.lora_B', 'fast_layers.2.attention.wqkv.lora_A', 'fast_layers.2.attention.wqkv.lora_B', 'fast_layers.2.attention.wo.lora_A', 'fast_layers.2.attention.wo.lora_B', 'fast_layers.2.feed_forward.w1.lora_A', 'fast_layers.2.feed_forward.w1.lora_B', 'fast_layers.2.feed_forward.w3.lora_A', 'fast_layers.2.feed_forward.w3.lora_B', 'fast_layers.2.feed_forward.w2.lora_A', 'fast_layers.2.feed_forward.w2.lora_B', 'fast_layers.3.attention.wqkv.lora_A', 'fast_layers.3.attention.wqkv.lora_B', 'fast_layers.3.attention.wo.lora_A', 'fast_layers.3.attention.wo.lora_B', 'fast_layers.3.feed_forward.w1.lora_A', 'fast_layers.3.feed_forward.w1.lora_B', 'fast_layers.3.feed_forward.w3.lora_A', 'fast_layers.3.feed_forward.w3.lora_B', 'fast_layers.3.feed_forward.w2.lora_A', 'fast_layers.3.feed_forward.w2.lora_B', 'fast_output.lora_A', 'fast_output.lora_B'], unexpected_keys=[])
78
+ [2024-07-09 16:20:49,581][__main__][INFO] - [rank: 0] Instantiating callbacks...
79
+ [2024-07-09 16:20:49,582][fish_speech.utils.instantiators][INFO] - [rank: 0] Instantiating callback <lightning.pytorch.callbacks.ModelCheckpoint>
80
+ [2024-07-09 16:20:49,584][fish_speech.utils.instantiators][INFO] - [rank: 0] Instantiating callback <lightning.pytorch.callbacks.ModelSummary>
81
+ [2024-07-09 16:20:49,585][fish_speech.utils.instantiators][INFO] - [rank: 0] Instantiating callback <lightning.pytorch.callbacks.LearningRateMonitor>
82
+ [2024-07-09 16:20:49,585][fish_speech.utils.instantiators][INFO] - [rank: 0] Instantiating callback <fish_speech.callbacks.GradNormMonitor>
83
+ [2024-07-09 16:20:49,603][__main__][INFO] - [rank: 0] Instantiating loggers...
84
+ [2024-07-09 16:20:49,603][fish_speech.utils.instantiators][INFO] - [rank: 0] Instantiating logger <lightning.pytorch.loggers.tensorboard.TensorBoardLogger>
85
+ [2024-07-09 16:20:49,610][__main__][INFO] - [rank: 0] Instantiating trainer <lightning.pytorch.trainer.Trainer>
86
+ [2024-07-09 16:20:49,655][__main__][INFO] - [rank: 0] Logging hyperparameters!
87
+ [2024-07-09 16:20:49,720][__main__][INFO] - [rank: 0] Starting training!
88
+ [2024-07-09 16:21:04,418][fish_speech.models.text2semantic.lit_module][INFO] - [rank: 0] Set weight decay: 0.01 for 432 parameters
89
+ [2024-07-09 16:21:04,418][fish_speech.models.text2semantic.lit_module][INFO] - [rank: 0] Set weight decay: 0.0 for 61 parameters
90
+ [2024-07-09 16:21:04,720][fish_speech.datasets.semantic][INFO] - [rank: 0] Reading 2 / 3 files
91
+ [2024-07-09 16:21:04,721][fish_speech.datasets.semantic][INFO] - [rank: 0] Reading 1 / 3 files
92
+ [2024-07-09 16:21:04,720][fish_speech.datasets.semantic][INFO] - [rank: 0] Reading 2 / 3 files
93
+ [2024-07-09 16:21:04,720][fish_speech.datasets.semantic][INFO] - [rank: 0] Reading 1 / 3 files
94
+ [2024-07-09 16:21:04,812][fish_speech.datasets.semantic][INFO] - [rank: 0] Read total 2 groups of data
95
+ [2024-07-09 16:21:04,843][fish_speech.datasets.semantic][INFO] - [rank: 0] Read total 1 groups of data
96
+ [2024-07-09 16:21:04,883][fish_speech.datasets.semantic][INFO] - [rank: 0] Read total 5 groups of data
97
+ [2024-07-09 16:21:04,891][fish_speech.datasets.semantic][INFO] - [rank: 0] Read total 4 groups of data
98
+ [2024-07-09 16:21:05,948][fish_speech.datasets.semantic][INFO] - [rank: 0] Reading 1 / 3 files
99
+ [2024-07-09 16:21:05,948][fish_speech.datasets.semantic][INFO] - [rank: 0] Reading 2 / 3 files
100
+ [2024-07-09 16:21:05,948][fish_speech.datasets.semantic][INFO] - [rank: 0] Reading 2 / 3 files
101
+ [2024-07-09 16:21:05,948][fish_speech.datasets.semantic][INFO] - [rank: 0] Reading 1 / 3 files
102
+ [2024-07-09 16:21:06,037][fish_speech.datasets.semantic][INFO] - [rank: 0] Read total 1 groups of data
103
+ [2024-07-09 16:21:06,040][fish_speech.datasets.semantic][INFO] - [rank: 0] Read total 2 groups of data
104
+ [2024-07-09 16:21:06,105][fish_speech.datasets.semantic][INFO] - [rank: 0] Read total 5 groups of data
105
+ [2024-07-09 16:21:06,110][fish_speech.datasets.semantic][INFO] - [rank: 0] Read total 4 groups of data