File size: 3,848 Bytes
7bae329
 
 
c7b0dc8
 
7bae329
 
 
 
 
c7b0dc8
 
7bae329
c7b0dc8
7bae329
c7b0dc8
 
7bae329
c7b0dc8
7bae329
c7b0dc8
 
7bae329
c7b0dc8
7bae329
c7b0dc8
 
7bae329
c7b0dc8
7bae329
c7b0dc8
 
7bae329
c7b0dc8
7bae329
c7b0dc8
 
7bae329
c7b0dc8
7bae329
c7b0dc8
 
7bae329
c7b0dc8
7bae329
c7b0dc8
 
7bae329
c7b0dc8
7bae329
c7b0dc8
 
7bae329
c7b0dc8
7bae329
c7b0dc8
 
7bae329
c7b0dc8
7bae329
 
 
c7b0dc8
 
7bae329
 
 
 
 
 
c7b0dc8
 
7bae329
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c7b0dc8
7bae329
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153

save_data: run
## Where the vocab(s) will be written
src_vocab: run/vocab/gl-es/bpe.vocab.src
tgt_vocab: run/vocab/gl-es/bpe.vocab.tgt
overwrite: True

# Corpus opts:
data:
    europarl:
        path_tgt: corpora/europarl/partitions/es_train.txt
        path_src: corpora/europarl_translit/partitions/gl_train.txt
        transforms: [bpe, filtertoolong]
        weight: 120
    opensub:
        path_tgt: corpora/opensub/partitions/es_train.txt
        path_src: corpora/opensub_translit/partitions/gl_train.txt
        transforms: [bpe, filtertoolong]
        weight: 180 
    dgt:
        path_tgt: corpora/dgt/partitions/es_train.txt
        path_src: corpora/dgt_translit/partitions/gl_train.txt
        transforms: [bpe, filtertoolong]
        weight: 18
    cluvi:
        path_tgt: corpora/cluvi/partitions/es_train.txt
        path_src: corpora/cluvi/partitions/gl_train.txt
        transforms: [bpe, filtertoolong]
        weight: 40
    opensub-es-gl:
        path_tgt: corpora/opensub-es-gl/partitions/es_train.txt
        path_src: corpora/opensub-es-gl/partitions/gl_train.txt
        transforms: [bpe, filtertoolong]
        weight: 25
    ted2020:
        path_tgt: corpora/ted2020/partitions/es_train.txt
        path_src: corpora/ted2020/partitions/gl_train.txt
        transforms: [bpe, filtertoolong]
        weight: 10
    corgaback:
        path_tgt: corpora/corgaback/partitions/es_train.txt
        path_src: corpora/corgaback/partitions/gl_train.txt
        transforms: [bpe, filtertoolong]
        weight: 13
    ccmatrix:
        path_tgt: corpora/ccmatrix/es.txt
        path_src: corpora/ccmatrix/gl.txt
        transforms: [bpe, filtertoolong]
        weight: 180
    resto:
        path_tgt: corpora/resto/es.txt
        path_src: corpora/resto/gl.txt
        transforms: [bpe, filtertoolong]
        weight: 120
    opensub_2018:
        path_tgt: corpora/opensub_2018/es.txt
        path_src: corpora/opensub_2018/gl.txt
        transforms: [bpe, filtertoolong]
        weight: 25

        
    valid:
        path_tgt: corpora/partitions/all-es_valid.txt
        path_src: corpora/partitions_translit/all-gl_valid.txt
        transforms: [bpe, filtertoolong]

### Transform related opts:
#### Subword
tgt_subword_model: ./bpe/es.code
src_subword_model: ./bpe/gl.code
tgt_subword_vocab: ./run/vocab/gl-es/bpe.vocab.src
src_subword_vocab: ./run/vocab/gl-es/bpe.vocab.tgt
src_subword_type: bpe
tgt_subord_type: bpe

src_subword_nbest: 1
src_subword_alpha: 0.0
tgt_subword_nbest: 1
tgt_subword_alpha: 0.0

##embeddings
tgt_embeddings: ../embeddings/es.emb.txt
src_embeddings: ../embeddings/gl.emb.txt

## supported types: GloVe, word2vec
embeddings_type: "word2vec"

# word_vec_size need to match with the pretrained embeddings dimensions
word_vec_size: 512


#### Filter
src_seq_length: 150
tgt_seq_length: 150

# silently ignore empty lines in the data
skip_empty_level: silent



# General opts
save_model: run/model
keep_checkpoint: 50
save_checkpoint_steps: 10000
average_decay: 0.0005
seed: 1234
report_every: 1000
train_steps: 200000
valid_steps: 10000

# Batching
queue_size: 10000
bucket_size: 32768
world_size: 1
gpu_ranks: [0]
batch_type: "tokens"
#batch_size: 4096
batch_size: 8192
valid_batch_size: 64
batch_size_multiple: 1
max_generator_batches: 2
accum_count: [4]
accum_steps: [0]

# Optimization
model_dtype: "fp16"
optim: "adam"
learning_rate: 2
#learning_rate: 0.00005
warmup_steps: 8000
decay_method: "noam"
adam_beta2: 0.998
max_grad_norm: 0
label_smoothing: 0.1
param_init: 0
param_init_glorot: true
normalization: "tokens"

# Model
encoder_type: transformer
decoder_type: transformer
position_encoding: true
enc_layers: 6
dec_layers: 6
heads: 8
rnn_size: 512
transformer_ff: 2048
dropout_steps: [0]
dropout: [0.1]
attention_dropout: [0.1]
share_decoder_embeddings: true
share_embeddings: false