jihong008 commited on
Commit
a88b382
1 Parent(s): ea6338d

End of training

Browse files
Files changed (5) hide show
  1. README.md +6 -1
  2. all_results.json +13 -13
  3. eval_results.json +7 -7
  4. train_results.json +7 -7
  5. trainer_state.json +21 -21
README.md CHANGED
@@ -2,6 +2,8 @@
2
  license: cc-by-nc-4.0
3
  library_name: peft
4
  tags:
 
 
5
  - generated_from_trainer
6
  base_model: facebook/musicgen-melody
7
  model-index:
@@ -14,7 +16,10 @@ should probably proofread and complete it, then remove this comment. -->
14
 
15
  # musicgen-melody-lora-punk
16
 
17
- This model is a fine-tuned version of [facebook/musicgen-melody](https://huggingface.co/facebook/musicgen-melody) on an unknown dataset.
 
 
 
18
 
19
  ## Model description
20
 
 
2
  license: cc-by-nc-4.0
3
  library_name: peft
4
  tags:
5
+ - text-to-audio
6
+ - ylacombe/tiny-punk
7
  - generated_from_trainer
8
  base_model: facebook/musicgen-melody
9
  model-index:
 
16
 
17
  # musicgen-melody-lora-punk
18
 
19
+ This model is a fine-tuned version of [facebook/musicgen-melody](https://huggingface.co/facebook/musicgen-melody) on the YLACOMBE/TINY-PUNK - DEFAULT dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 5.0972
22
+ - Clap: 0.0238
23
 
24
  ## Model description
25
 
all_results.json CHANGED
@@ -1,15 +1,15 @@
1
  {
2
- "epoch": 3.0,
3
- "eval_clap": -0.018694762140512466,
4
- "eval_loss": 4.857634544372559,
5
- "eval_runtime": 150.0944,
6
- "eval_samples": 8,
7
- "eval_samples_per_second": 0.053,
8
- "eval_steps_per_second": 0.053,
9
- "total_flos": 415739226780.0,
10
- "train_loss": 2.6683399379253387,
11
- "train_runtime": 15.8841,
12
- "train_samples": 5,
13
- "train_samples_per_second": 1.259,
14
- "train_steps_per_second": 0.252
15
  }
 
1
  {
2
+ "epoch": 4.0,
3
+ "eval_clap": 0.023790106177330017,
4
+ "eval_loss": 5.097151279449463,
5
+ "eval_runtime": 30.31,
6
+ "eval_samples": 5,
7
+ "eval_samples_per_second": 0.165,
8
+ "eval_steps_per_second": 0.033,
9
+ "total_flos": 110863793808.0,
10
+ "train_loss": 1.1294779181480408,
11
+ "train_runtime": 21.3494,
12
+ "train_samples": 1,
13
+ "train_samples_per_second": 0.187,
14
+ "train_steps_per_second": 0.187
15
  }
eval_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "epoch": 3.0,
3
- "eval_clap": -0.018694762140512466,
4
- "eval_loss": 4.857634544372559,
5
- "eval_runtime": 150.0944,
6
- "eval_samples": 8,
7
- "eval_samples_per_second": 0.053,
8
- "eval_steps_per_second": 0.053
9
  }
 
1
  {
2
+ "epoch": 4.0,
3
+ "eval_clap": 0.023790106177330017,
4
+ "eval_loss": 5.097151279449463,
5
+ "eval_runtime": 30.31,
6
+ "eval_samples": 5,
7
+ "eval_samples_per_second": 0.165,
8
+ "eval_steps_per_second": 0.033
9
  }
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "epoch": 3.0,
3
- "total_flos": 415739226780.0,
4
- "train_loss": 2.6683399379253387,
5
- "train_runtime": 15.8841,
6
- "train_samples": 5,
7
- "train_samples_per_second": 1.259,
8
- "train_steps_per_second": 0.252
9
  }
 
1
  {
2
+ "epoch": 4.0,
3
+ "total_flos": 110863793808.0,
4
+ "train_loss": 1.1294779181480408,
5
+ "train_runtime": 21.3494,
6
+ "train_samples": 1,
7
+ "train_samples_per_second": 0.187,
8
+ "train_steps_per_second": 0.187
9
  }
trainer_state.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 3.0,
5
  "eval_steps": 25,
6
  "global_step": 4,
7
  "is_hyper_param_search": false,
@@ -10,40 +10,40 @@
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
- "grad_norm": 0.6173375248908997,
14
  "learning_rate": 0.00015000000000000001,
15
- "loss": 3.628,
16
  "step": 1
17
  },
18
  {
19
  "epoch": 2.0,
20
- "grad_norm": 0.6236879825592041,
21
  "learning_rate": 0.0001,
22
- "loss": 3.5988,
23
  "step": 2
24
  },
25
  {
26
- "epoch": 2.6666666666666665,
27
- "grad_norm": 0.47706061601638794,
28
  "learning_rate": 5e-05,
29
- "loss": 2.3147,
30
  "step": 3
31
  },
32
  {
33
- "epoch": 3.0,
34
- "grad_norm": 0.338273286819458,
35
  "learning_rate": 0.0,
36
- "loss": 1.1319,
37
  "step": 4
38
  },
39
  {
40
- "epoch": 3.0,
41
  "step": 4,
42
- "total_flos": 415739226780.0,
43
- "train_loss": 2.6683399379253387,
44
- "train_runtime": 15.8841,
45
- "train_samples_per_second": 1.259,
46
- "train_steps_per_second": 0.252
47
  }
48
  ],
49
  "logging_steps": 1.0,
@@ -57,14 +57,14 @@
57
  "should_epoch_stop": false,
58
  "should_evaluate": false,
59
  "should_log": false,
60
- "should_save": true,
61
- "should_training_stop": true
62
  },
63
  "attributes": {}
64
  }
65
  },
66
- "total_flos": 415739226780.0,
67
- "train_batch_size": 2,
68
  "trial_name": null,
69
  "trial_params": null
70
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 4.0,
5
  "eval_steps": 25,
6
  "global_step": 4,
7
  "is_hyper_param_search": false,
 
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
+ "grad_norm": 30085.0234375,
14
  "learning_rate": 0.00015000000000000001,
15
+ "loss": 1.1752,
16
  "step": 1
17
  },
18
  {
19
  "epoch": 2.0,
20
+ "grad_norm": 28943.8359375,
21
  "learning_rate": 0.0001,
22
+ "loss": 1.1398,
23
  "step": 2
24
  },
25
  {
26
+ "epoch": 3.0,
27
+ "grad_norm": 29536.12890625,
28
  "learning_rate": 5e-05,
29
+ "loss": 1.102,
30
  "step": 3
31
  },
32
  {
33
+ "epoch": 4.0,
34
+ "grad_norm": 34210.921875,
35
  "learning_rate": 0.0,
36
+ "loss": 1.1008,
37
  "step": 4
38
  },
39
  {
40
+ "epoch": 4.0,
41
  "step": 4,
42
+ "total_flos": 110863793808.0,
43
+ "train_loss": 1.1294779181480408,
44
+ "train_runtime": 21.3494,
45
+ "train_samples_per_second": 0.187,
46
+ "train_steps_per_second": 0.187
47
  }
48
  ],
49
  "logging_steps": 1.0,
 
57
  "should_epoch_stop": false,
58
  "should_evaluate": false,
59
  "should_log": false,
60
+ "should_save": false,
61
+ "should_training_stop": false
62
  },
63
  "attributes": {}
64
  }
65
  },
66
+ "total_flos": 110863793808.0,
67
+ "train_batch_size": 16,
68
  "trial_name": null,
69
  "trial_params": null
70
  }