Upload config.yaml
Browse filesHi I am Susnato, First of all I loved your work! I am currently trying to add your model to HuggingFace. So some files such as config.yaml, tokenizer_model.ckpt files are needed to be present in the repository. Please add this file. The PR is [here](https://github.com/huggingface/transformers/pull/21785)
- config.yaml +61 -0
config.yaml
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
project: pop2piano
|
2 |
+
dataset:
|
3 |
+
target_length: 256
|
4 |
+
input_length: 1024
|
5 |
+
n_bars: 2
|
6 |
+
sample_rate: 22050
|
7 |
+
use_mel: true
|
8 |
+
mel_is_conditioned: true
|
9 |
+
composer_to_feature_token:
|
10 |
+
composer1: 2052
|
11 |
+
composer2: 2053
|
12 |
+
composer3: 2054
|
13 |
+
composer4: 2055
|
14 |
+
composer5: 2056
|
15 |
+
composer6: 2057
|
16 |
+
composer7: 2058
|
17 |
+
composer8: 2059
|
18 |
+
composer9: 2060
|
19 |
+
composer10: 2061
|
20 |
+
composer11: 2062
|
21 |
+
composer12: 2063
|
22 |
+
composer13: 2064
|
23 |
+
composer14: 2065
|
24 |
+
composer15: 2066
|
25 |
+
composer16: 2067
|
26 |
+
composer17: 2068
|
27 |
+
composer18: 2069
|
28 |
+
composer19: 2070
|
29 |
+
composer20: 2071
|
30 |
+
composer21: 2072
|
31 |
+
t5:
|
32 |
+
feed_forward_proj: gated-gelu
|
33 |
+
tie_word_embeddings: false
|
34 |
+
tie_encoder_decoder: false
|
35 |
+
vocab_size: 2400
|
36 |
+
n_positions: 1024
|
37 |
+
relative_attention_num_buckets: 32
|
38 |
+
tokenizer:
|
39 |
+
vocab_size:
|
40 |
+
special: 4
|
41 |
+
note: 128
|
42 |
+
velocity: 2
|
43 |
+
time: 100
|
44 |
+
training:
|
45 |
+
seed: 3407
|
46 |
+
resume: false
|
47 |
+
offline: false
|
48 |
+
num_gpu: 1
|
49 |
+
max_epochs: 5000
|
50 |
+
accumulate_grad_batches: 1
|
51 |
+
check_val_every_n_epoch: 20
|
52 |
+
find_lr: false
|
53 |
+
optimizer: adafactor
|
54 |
+
version: none
|
55 |
+
lr: 0.001
|
56 |
+
lr_min: 1.0e-06
|
57 |
+
lr_scheduler: false
|
58 |
+
lr_decay: 0.99
|
59 |
+
batch_size: 32
|
60 |
+
num_workers: 32
|
61 |
+
gradient_clip_val: 3.0
|