ylacombe commited on
Commit
b756beb
·
verified ·
1 Parent(s): 339487e

End of training

Browse files
Files changed (5) hide show
  1. README.md +6 -1
  2. all_results.json +10 -10
  3. eval_results.json +5 -5
  4. train_results.json +5 -5
  5. trainer_state.json +6 -6
README.md CHANGED
@@ -2,6 +2,8 @@
2
  license: cc-by-nc-4.0
3
  library_name: peft
4
  tags:
 
 
5
  - generated_from_trainer
6
  base_model: facebook/musicgen-melody
7
  model-index:
@@ -14,7 +16,10 @@ should probably proofread and complete it, then remove this comment. -->
14
 
15
  # musicgen-melody-punk-lora
16
 
17
- This model is a fine-tuned version of [facebook/musicgen-melody](https://huggingface.co/facebook/musicgen-melody) on an unknown dataset.
 
 
 
18
 
19
  ## Model description
20
 
 
2
  license: cc-by-nc-4.0
3
  library_name: peft
4
  tags:
5
+ - text-to-audio
6
+ - ylacombe/tiny-punk
7
  - generated_from_trainer
8
  base_model: facebook/musicgen-melody
9
  model-index:
 
16
 
17
  # musicgen-melody-punk-lora
18
 
19
+ This model is a fine-tuned version of [facebook/musicgen-melody](https://huggingface.co/facebook/musicgen-melody) on the YLACOMBE/TINY-PUNK - DEFAULT dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.2634
22
+ - Clap: nan
23
 
24
  ## Model description
25
 
all_results.json CHANGED
@@ -1,15 +1,15 @@
1
  {
2
  "epoch": 3.764705882352941,
3
- "eval_clap": 0.3365415632724762,
4
- "eval_loss": 4.704524993896484,
5
- "eval_runtime": 147.6762,
6
  "eval_samples": 5,
7
- "eval_samples_per_second": 0.034,
8
- "eval_steps_per_second": 0.034,
9
- "total_flos": 59875624258368.0,
10
- "train_loss": 8.819526672363281,
11
- "train_runtime": 269.5748,
12
  "train_samples": 33,
13
- "train_samples_per_second": 0.49,
14
- "train_steps_per_second": 0.03
15
  }
 
1
  {
2
  "epoch": 3.764705882352941,
3
+ "eval_clap": NaN,
4
+ "eval_loss": 0.2634366452693939,
5
+ "eval_runtime": 177.4407,
6
  "eval_samples": 5,
7
+ "eval_samples_per_second": 0.028,
8
+ "eval_steps_per_second": 0.028,
9
+ "total_flos": 35328595626816.0,
10
+ "train_loss": 6.771849155426025,
11
+ "train_runtime": 276.9653,
12
  "train_samples": 33,
13
+ "train_samples_per_second": 0.477,
14
+ "train_steps_per_second": 0.029
15
  }
eval_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 3.764705882352941,
3
- "eval_clap": 0.3365415632724762,
4
- "eval_loss": 4.704524993896484,
5
- "eval_runtime": 147.6762,
6
  "eval_samples": 5,
7
- "eval_samples_per_second": 0.034,
8
- "eval_steps_per_second": 0.034
9
  }
 
1
  {
2
  "epoch": 3.764705882352941,
3
+ "eval_clap": NaN,
4
+ "eval_loss": 0.2634366452693939,
5
+ "eval_runtime": 177.4407,
6
  "eval_samples": 5,
7
+ "eval_samples_per_second": 0.028,
8
+ "eval_steps_per_second": 0.028
9
  }
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 3.764705882352941,
3
- "total_flos": 59875624258368.0,
4
- "train_loss": 8.819526672363281,
5
- "train_runtime": 269.5748,
6
  "train_samples": 33,
7
- "train_samples_per_second": 0.49,
8
- "train_steps_per_second": 0.03
9
  }
 
1
  {
2
  "epoch": 3.764705882352941,
3
+ "total_flos": 35328595626816.0,
4
+ "train_loss": 6.771849155426025,
5
+ "train_runtime": 276.9653,
6
  "train_samples": 33,
7
+ "train_samples_per_second": 0.477,
8
+ "train_steps_per_second": 0.029
9
  }
trainer_state.json CHANGED
@@ -11,11 +11,11 @@
11
  {
12
  "epoch": 3.764705882352941,
13
  "step": 8,
14
- "total_flos": 59875624258368.0,
15
- "train_loss": 8.819526672363281,
16
- "train_runtime": 269.5748,
17
- "train_samples_per_second": 0.49,
18
- "train_steps_per_second": 0.03
19
  }
20
  ],
21
  "logging_steps": 25,
@@ -23,7 +23,7 @@
23
  "num_input_tokens_seen": 0,
24
  "num_train_epochs": 4,
25
  "save_steps": 500,
26
- "total_flos": 59875624258368.0,
27
  "train_batch_size": 2,
28
  "trial_name": null,
29
  "trial_params": null
 
11
  {
12
  "epoch": 3.764705882352941,
13
  "step": 8,
14
+ "total_flos": 35328595626816.0,
15
+ "train_loss": 6.771849155426025,
16
+ "train_runtime": 276.9653,
17
+ "train_samples_per_second": 0.477,
18
+ "train_steps_per_second": 0.029
19
  }
20
  ],
21
  "logging_steps": 25,
 
23
  "num_input_tokens_seen": 0,
24
  "num_train_epochs": 4,
25
  "save_steps": 500,
26
+ "total_flos": 35328595626816.0,
27
  "train_batch_size": 2,
28
  "trial_name": null,
29
  "trial_params": null