xiaosh9527 commited on
Commit
c54f21f
1 Parent(s): 83e4fb5

End of training

Browse files
Files changed (2) hide show
  1. README.md +3 -1
  2. trainer_state.json +39 -39
README.md CHANGED
@@ -2,6 +2,8 @@
2
  license: cc-by-nc-4.0
3
  library_name: peft
4
  tags:
 
 
5
  - generated_from_trainer
6
  base_model: facebook/musicgen-melody
7
  model-index:
@@ -14,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
14
 
15
  # musicgen-melody-lora-techno-nan-colab
16
 
17
- This model is a fine-tuned version of [facebook/musicgen-melody](https://huggingface.co/facebook/musicgen-melody) on an unknown dataset.
18
 
19
  ## Model description
20
 
 
2
  license: cc-by-nc-4.0
3
  library_name: peft
4
  tags:
5
+ - text-to-audio
6
+ - techno_nan
7
  - generated_from_trainer
8
  base_model: facebook/musicgen-melody
9
  model-index:
 
16
 
17
  # musicgen-melody-lora-techno-nan-colab
18
 
19
+ This model is a fine-tuned version of [facebook/musicgen-melody](https://huggingface.co/facebook/musicgen-melody) on the xiaosh9527/nan_music dataset.
20
 
21
  ## Model description
22
 
trainer_state.json CHANGED
@@ -10,96 +10,96 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.32,
13
- "grad_norm": 1.016900658607483,
14
- "learning_rate": 0.00018333333333333334,
15
- "loss": 9.5861,
16
  "step": 2
17
  },
18
  {
19
  "epoch": 0.64,
20
- "grad_norm": 1.7372517585754395,
21
- "learning_rate": 0.0001666666666666667,
22
- "loss": 9.2056,
23
  "step": 4
24
  },
25
  {
26
  "epoch": 0.96,
27
- "grad_norm": 2.0075902938842773,
28
- "learning_rate": 0.00015000000000000001,
29
- "loss": 8.5072,
30
  "step": 6
31
  },
32
  {
33
  "epoch": 1.28,
34
- "grad_norm": 2.0754647254943848,
35
- "learning_rate": 0.00013333333333333334,
36
- "loss": 8.0374,
37
  "step": 8
38
  },
39
  {
40
  "epoch": 1.6,
41
- "grad_norm": 1.2964591979980469,
42
- "learning_rate": 0.00011666666666666668,
43
- "loss": 7.6874,
44
  "step": 10
45
  },
46
  {
47
  "epoch": 1.92,
48
- "grad_norm": 0.9893752336502075,
49
- "learning_rate": 0.0001,
50
- "loss": 7.4668,
51
  "step": 12
52
  },
53
  {
54
  "epoch": 2.24,
55
- "grad_norm": 0.9852508306503296,
56
- "learning_rate": 8.333333333333334e-05,
57
- "loss": 7.4002,
58
  "step": 14
59
  },
60
  {
61
  "epoch": 2.56,
62
- "grad_norm": 0.9375394582748413,
63
- "learning_rate": 6.666666666666667e-05,
64
- "loss": 7.3329,
65
  "step": 16
66
  },
67
  {
68
  "epoch": 2.88,
69
- "grad_norm": 0.8562221527099609,
70
- "learning_rate": 5e-05,
71
- "loss": 7.2785,
72
  "step": 18
73
  },
74
  {
75
  "epoch": 3.2,
76
- "grad_norm": 0.663221538066864,
77
- "learning_rate": 3.3333333333333335e-05,
78
- "loss": 7.2907,
79
  "step": 20
80
  },
81
  {
82
  "epoch": 3.52,
83
- "grad_norm": 1.024126648902893,
84
- "learning_rate": 1.6666666666666667e-05,
85
- "loss": 7.2383,
86
  "step": 22
87
  },
88
  {
89
  "epoch": 3.84,
90
- "grad_norm": 0.7755734920501709,
91
  "learning_rate": 0.0,
92
- "loss": 7.2692,
93
  "step": 24
94
  },
95
  {
96
  "epoch": 3.84,
97
  "step": 24,
98
  "total_flos": 92571267829680.0,
99
- "train_loss": 7.858357032140096,
100
- "train_runtime": 664.0524,
101
- "train_samples_per_second": 0.602,
102
- "train_steps_per_second": 0.036
103
  }
104
  ],
105
  "logging_steps": 2,
 
10
  "log_history": [
11
  {
12
  "epoch": 0.32,
13
+ "grad_norm": 0.20171859860420227,
14
+ "learning_rate": 9.166666666666667e-05,
15
+ "loss": 6.7889,
16
  "step": 2
17
  },
18
  {
19
  "epoch": 0.64,
20
+ "grad_norm": 0.2258663922548294,
21
+ "learning_rate": 8.333333333333334e-05,
22
+ "loss": 6.8518,
23
  "step": 4
24
  },
25
  {
26
  "epoch": 0.96,
27
+ "grad_norm": 0.2266591638326645,
28
+ "learning_rate": 7.500000000000001e-05,
29
+ "loss": 6.8562,
30
  "step": 6
31
  },
32
  {
33
  "epoch": 1.28,
34
+ "grad_norm": 0.19837072491645813,
35
+ "learning_rate": 6.666666666666667e-05,
36
+ "loss": 6.8282,
37
  "step": 8
38
  },
39
  {
40
  "epoch": 1.6,
41
+ "grad_norm": 0.17619071900844574,
42
+ "learning_rate": 5.833333333333334e-05,
43
+ "loss": 6.8528,
44
  "step": 10
45
  },
46
  {
47
  "epoch": 1.92,
48
+ "grad_norm": 0.12639087438583374,
49
+ "learning_rate": 5e-05,
50
+ "loss": 6.8231,
51
  "step": 12
52
  },
53
  {
54
  "epoch": 2.24,
55
+ "grad_norm": 0.13687172532081604,
56
+ "learning_rate": 4.166666666666667e-05,
57
+ "loss": 6.7815,
58
  "step": 14
59
  },
60
  {
61
  "epoch": 2.56,
62
+ "grad_norm": 0.18898847699165344,
63
+ "learning_rate": 3.3333333333333335e-05,
64
+ "loss": 6.7792,
65
  "step": 16
66
  },
67
  {
68
  "epoch": 2.88,
69
+ "grad_norm": 0.1648869514465332,
70
+ "learning_rate": 2.5e-05,
71
+ "loss": 6.8046,
72
  "step": 18
73
  },
74
  {
75
  "epoch": 3.2,
76
+ "grad_norm": 0.16822992265224457,
77
+ "learning_rate": 1.6666666666666667e-05,
78
+ "loss": 6.8128,
79
  "step": 20
80
  },
81
  {
82
  "epoch": 3.52,
83
+ "grad_norm": 0.16933274269104004,
84
+ "learning_rate": 8.333333333333334e-06,
85
+ "loss": 6.8081,
86
  "step": 22
87
  },
88
  {
89
  "epoch": 3.84,
90
+ "grad_norm": 0.1729530692100525,
91
  "learning_rate": 0.0,
92
+ "loss": 6.8425,
93
  "step": 24
94
  },
95
  {
96
  "epoch": 3.84,
97
  "step": 24,
98
  "total_flos": 92571267829680.0,
99
+ "train_loss": 6.819143931070964,
100
+ "train_runtime": 117.2771,
101
+ "train_samples_per_second": 3.411,
102
+ "train_steps_per_second": 0.205
103
  }
104
  ],
105
  "logging_steps": 2,