File size: 2,054 Bytes
73e9b11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13055a5
73e9b11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
accum_grad: 1
allow_variable_data_keys: false
batch_bins: 1000000
batch_size: 20
batch_type: folded
best_model_criterion:
- - train
  - loss
  - min
- - valid
  - loss
  - min
- - train
  - acc
  - max
- - valid
  - acc
  - max
bpemodel: data/token_list/bpe_unigram30/bpe.model
chunk_length: 500
chunk_shift_ratio: 0.5
cleaner: null
collect_stats: false
config: null
cudnn_benchmark: false
cudnn_deterministic: true
cudnn_enabled: true
dist_backend: nccl
dist_init_method: env://
dist_launcher: null
dist_master_addr: null
dist_master_port: null
dist_rank: null
dist_world_size: null
distributed: false
dry_run: false
early_stopping_criterion:
- valid
- loss
- min
fold_length:
- 150
g2p: null
grad_clip: 5.0
grad_noise: false
init: null
iterator_type: sequence
keep_nbest_models: 10
lm: seq_rnn
lm_conf: {}
local_rank: 0
log_interval: null
log_level: INFO
max_cache_size: 0.0
max_epoch: 40
model_conf:
  ignore_id: 0
multiple_iterator: false
multiprocessing_distributed: false
ngpu: 1
no_forward_run: false
non_linguistic_symbols: null
num_att_plot: 3
num_cache_chunks: 1024
num_iters_per_epoch: null
num_workers: 1
optim: adadelta
optim_conf: {}
output_dir: exp/lm_train_bpe
patience: null
pretrain_key: []
pretrain_path: []
print_config: false
required:
- output_dir
- token_list
resume: true
scheduler: null
scheduler_conf: {}
seed: 0
sort_batch: descending
sort_in_batch: descending
token_list:
- <blank>
- <unk>
- T
- "\u2581"
- I
- H
- G
- O
- AR
- "\u2581T"
- NE
- E
- EN
- Y
- "\u2581E"
- "\u2581S"
- EVEN
- F
- M
- C
- R
- D
- N
- W
- ENT
- L
- <sos/eos>
token_type: bpe
train_data_path_and_name_and_type:
- - dump/raw/srctexts
  - text
  - text
train_dtype: float32
train_shape_file:
- exp/lm_stats/train/text_shape.bpe
use_preprocessor: true
val_scheduler_criterion:
- valid
- loss
valid_batch_bins: null
valid_batch_size: null
valid_batch_type: null
valid_data_path_and_name_and_type:
- - dump/raw/train_dev/text
  - text
  - text
valid_max_cache_size: null
valid_shape_file:
- exp/lm_stats/valid/text_shape.bpe
write_collected_feats: false