reach-vb HF staff commited on
Commit
103d46f
1 Parent(s): 40585d3

Upload folder using huggingface_hub

Browse files
gla/gla_130.yaml ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ class_path: model.lina.Lina
3
+ init_args:
4
+ n_warmup_steps: 500
5
+ learning_rate: 5e-4
6
+ n_codebook: 1024
7
+ n_special_token_in: 3
8
+ n_special_token_out: 3
9
+ n_txt_vocab: 256
10
+ d_context: 768
11
+ d_model: 768
12
+ quant_layer: [0, 1, 2, 3]
13
+ txt_encoder:
14
+ class_path: model.encoder.TextEncoder
15
+ init_args:
16
+ dim: 768
17
+ heads: 8
18
+ n_layers: 9
19
+ dropout: 0.1
20
+ attentive_rnn:
21
+ class_path: model.gla.AttentiveGLA
22
+ init_args:
23
+ d_model: 768
24
+ d_context: 768
25
+ heads: 4
26
+ dropout_att: 0.2
27
+ dropout: 0.
28
+ n_layer: 6
29
+ blind: True
30
+ d_blind: 128
gla/gla_130_librilight.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:541dd9f05eb99452939e3c2d237c6d94e89436ed18eae0039ede69607fc3d133
3
+ size 524258222
gla/gla_130_librilight_ft_librittsr.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b672cff7cdce2f89e0cc8ba98c0efb80c4aa4b5c178b277b09415d5ae4aa88d3
3
+ size 524262706
gla/gla_60.yaml ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ class_path: model.lina.Lina
3
+ init_args:
4
+ n_warmup_steps: 500
5
+ learning_rate: 5e-4
6
+ n_codebook: 1024
7
+ n_special_token_in: 3
8
+ n_special_token_out: 3
9
+ n_txt_vocab: 256
10
+ d_context: 512
11
+ d_model: 512
12
+ quant_layer: [0, 1, 2, 3]
13
+ txt_encoder:
14
+ class_path: model.encoder.TextEncoder
15
+ init_args:
16
+ dim: 512
17
+ heads: 8
18
+ n_layers: 9
19
+ dropout: 0.1
20
+ attentive_rnn:
21
+ class_path: model.gla.AttentiveGLA
22
+ init_args:
23
+ d_model: 512
24
+ d_context: 512
25
+ heads: 4
26
+ dropout_att: 0.2
27
+ dropout: 0.
28
+ n_layer: 6
29
+ blind: True
30
+ d_blind: 128
gla/gla_60_librilight.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3f5aa5b342bac9119bbf3cdf1aea6a6ff496ca3ee25482826f603aa0150da9e
3
+ size 239630918
mamba/mamba_60.yaml ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ class_path: model.lina.Lina
3
+ init_args:
4
+ n_warmup_steps: 500
5
+ learning_rate: 5e-4
6
+ n_codebook: 1024
7
+ n_special_token_in: 3
8
+ n_special_token_out: 3
9
+ n_txt_vocab: 256
10
+ d_context: 512
11
+ d_model: 512
12
+ quant_layer: [0, 1, 2, 3]
13
+ txt_encoder:
14
+ class_path: model.encoder.TextEncoder
15
+ init_args:
16
+ dim: 512
17
+ heads: 8
18
+ n_layers: 9
19
+ dropout: 0.1
20
+ attentive_rnn:
21
+ class_path: model.mamba.AttentiveMamba
22
+ init_args:
23
+ d_model: 512
24
+ d_context: 512
25
+ heads: 1
26
+ dropout_att: 0.1
27
+ n_layer: 12
28
+ blind: True
29
+ d_blind: 128
mamba/mamba_60_librilight.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c3d4edc4018a71ecc62359d5b70def8d71ef73c40131d73939a2c447fdc77ec
3
+ size 250636269
rwkv/rwkv6_60.yaml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ class_path: model.lina.Lina
3
+ init_args:
4
+ n_codebook: 1024
5
+ n_special_token_in: 3
6
+ n_special_token_out: 3
7
+ n_txt_vocab: 180
8
+ d_context: 384
9
+ d_model: 512
10
+ quant_layer: [0, 1, 2, 3]
11
+ txt_encoder:
12
+ class_path: model.encoder.TextEncoder
13
+ init_args:
14
+ dim: 512
15
+ heads: 1
16
+ n_layers: 6
17
+ dropout: 0.1
18
+ attentive_rnn:
19
+ class_path: model.rwkv6x.AttentiveRWKV6
20
+ init_args:
21
+ d_model: 512
22
+ d_context: 512
23
+ heads: 1
24
+ dropout_att: 0.2
25
+ n_layer: 3
26
+ blind: True
rwkv/rwkv6_60_libritts.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fdb025f5a7c45958e8a4b83d70f3d26cc6c728027b0419a44569bea430dd712
3
+ size 262986252