jihong008 commited on
Commit
ea6338d
1 Parent(s): 418035e

Model save

Browse files
README.md CHANGED
@@ -2,8 +2,6 @@
2
  license: cc-by-nc-4.0
3
  library_name: peft
4
  tags:
5
- - text-to-audio
6
- - ylacombe/tiny-punk
7
  - generated_from_trainer
8
  base_model: facebook/musicgen-melody
9
  model-index:
@@ -14,13 +12,9 @@ model-index:
14
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
  should probably proofread and complete it, then remove this comment. -->
16
 
17
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/229461195/huggingface/runs/euqah3uh)
18
  # musicgen-melody-lora-punk
19
 
20
- This model is a fine-tuned version of [facebook/musicgen-melody](https://huggingface.co/facebook/musicgen-melody) on the YLACOMBE/TINY-PUNK - DEFAULT dataset.
21
- It achieves the following results on the evaluation set:
22
- - Loss: 4.8576
23
- - Clap: -0.0187
24
 
25
  ## Model description
26
 
@@ -40,11 +34,11 @@ More information needed
40
 
41
  The following hyperparameters were used during training:
42
  - learning_rate: 0.0002
43
- - train_batch_size: 2
44
- - eval_batch_size: 1
45
  - seed: 456
46
  - gradient_accumulation_steps: 8
47
- - total_train_batch_size: 16
48
  - optimizer: Adam with betas=(0.9,0.99) and epsilon=1e-08
49
  - lr_scheduler_type: linear
50
  - num_epochs: 4.0
@@ -57,7 +51,7 @@ The following hyperparameters were used during training:
57
  ### Framework versions
58
 
59
  - PEFT 0.11.1
60
- - Transformers 4.42.0.dev0
61
- - Pytorch 2.3.0+cu121
62
- - Datasets 2.19.1
63
  - Tokenizers 0.19.1
 
2
  license: cc-by-nc-4.0
3
  library_name: peft
4
  tags:
 
 
5
  - generated_from_trainer
6
  base_model: facebook/musicgen-melody
7
  model-index:
 
12
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
  should probably proofread and complete it, then remove this comment. -->
14
 
 
15
  # musicgen-melody-lora-punk
16
 
17
+ This model is a fine-tuned version of [facebook/musicgen-melody](https://huggingface.co/facebook/musicgen-melody) on an unknown dataset.
 
 
 
18
 
19
  ## Model description
20
 
 
34
 
35
  The following hyperparameters were used during training:
36
  - learning_rate: 0.0002
37
+ - train_batch_size: 16
38
+ - eval_batch_size: 8
39
  - seed: 456
40
  - gradient_accumulation_steps: 8
41
+ - total_train_batch_size: 128
42
  - optimizer: Adam with betas=(0.9,0.99) and epsilon=1e-08
43
  - lr_scheduler_type: linear
44
  - num_epochs: 4.0
 
51
  ### Framework versions
52
 
53
  - PEFT 0.11.1
54
+ - Transformers 4.41.2
55
+ - Pytorch 2.3.1+cu121
56
+ - Datasets 2.19.2
57
  - Tokenizers 0.19.1
adapter_config.json CHANGED
@@ -23,22 +23,22 @@
23
  "rank_pattern": {},
24
  "revision": null,
25
  "target_modules": [
26
- "v_proj",
 
27
  "enc_to_dec_proj",
28
- "embed_tokens.0",
29
- "embed_tokens.1",
30
  "out_proj",
 
 
 
 
 
31
  "k_proj",
32
- "embed_tokens.2",
33
- "fc2",
34
  "audio_enc_to_dec_proj",
35
- "embed_tokens.3",
36
- "q_proj",
37
- "lm_heads.3",
38
  "fc1",
39
- "lm_heads.2",
40
- "lm_heads.1",
41
- "lm_heads.0"
 
42
  ],
43
  "task_type": null,
44
  "use_dora": false,
 
23
  "rank_pattern": {},
24
  "revision": null,
25
  "target_modules": [
26
+ "lm_heads.3",
27
+ "embed_tokens.3",
28
  "enc_to_dec_proj",
 
 
29
  "out_proj",
30
+ "embed_tokens.1",
31
+ "q_proj",
32
+ "v_proj",
33
+ "lm_heads.0",
34
+ "lm_heads.2",
35
  "k_proj",
 
 
36
  "audio_enc_to_dec_proj",
 
 
 
37
  "fc1",
38
+ "embed_tokens.0",
39
+ "fc2",
40
+ "embed_tokens.2",
41
+ "lm_heads.1"
42
  ],
43
  "task_type": null,
44
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:26645f3f84341a09d4def575aadedd1c5802e0f9bf10c4c51f5cf7c1c3e62269
3
  size 87103456
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2077f283bdb33ec1aa7357d3956c53a7ed8a5e1a436f59e2039f57b402615798
3
  size 87103456
config.json CHANGED
@@ -295,5 +295,5 @@
295
  "vocab_size": 32128
296
  },
297
  "torch_dtype": "float32",
298
- "transformers_version": "4.42.0.dev0"
299
  }
 
295
  "vocab_size": 32128
296
  },
297
  "torch_dtype": "float32",
298
+ "transformers_version": "4.41.2"
299
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2a79373b974d4c635c296d2fa0ac6290c00cb0e8a357a0bf906d72f554d94851
3
- size 5304
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e8a403a0c088d52c06ba1f81d35013c82284c4f340d50697d9dd2041f60d62c
3
+ size 5240