Dallyana's picture
Upload 9 files
3c576c1 verified
config: conf/train_lm.yaml
print_config: false
log_level: INFO
drop_last_iter: false
dry_run: false
iterator_type: sequence
valid_iterator_type: null
output_dir: exp/lm_train_lm_es_bpe64
ngpu: 0
seed: 0
num_workers: 1
num_att_plot: 3
dist_backend: nccl
dist_init_method: env://
dist_world_size: null
dist_rank: null
local_rank: null
dist_master_addr: null
dist_master_port: null
dist_launcher: null
multiprocessing_distributed: false
unused_parameters: false
sharded_ddp: false
cudnn_enabled: true
cudnn_benchmark: false
cudnn_deterministic: true
collect_stats: false
write_collected_feats: false
max_epoch: 20
patience: null
val_scheduler_criterion:
- valid
- loss
early_stopping_criterion:
- valid
- loss
- min
best_model_criterion:
- - valid
- loss
- min
keep_nbest_models: 1
nbest_averaging_interval: 0
grad_clip: 5.0
grad_clip_type: 2.0
grad_noise: false
accum_grad: 1
no_forward_run: false
resume: true
train_dtype: float32
use_amp: false
log_interval: null
use_matplotlib: true
use_tensorboard: true
create_graph_in_tensorboard: false
use_wandb: false
wandb_project: null
wandb_id: null
wandb_entity: null
wandb_name: null
wandb_model_log_interval: -1
detect_anomaly: false
use_lora: false
save_lora_only: true
lora_conf: {}
pretrain_path: null
init_param: []
ignore_init_mismatch: false
freeze_param: []
num_iters_per_epoch: null
batch_size: 64
valid_batch_size: null
batch_bins: 1000000
valid_batch_bins: null
train_shape_file:
- exp/lm_stats_es_bpe64/train/text_shape.bpe
valid_shape_file:
- exp/lm_stats_es_bpe64/valid/text_shape.bpe
batch_type: folded
valid_batch_type: null
fold_length:
- 150
sort_in_batch: descending
shuffle_within_batch: false
sort_batch: descending
multiple_iterator: false
chunk_length: 500
chunk_shift_ratio: 0.5
num_cache_chunks: 1024
chunk_excluded_key_prefixes: []
chunk_default_fs: null
train_data_path_and_name_and_type:
- - dump/raw/lm_train.txt
- text
- text
valid_data_path_and_name_and_type:
- - dump/raw/org/train_dev/text
- text
- text
allow_variable_data_keys: false
max_cache_size: 0.0
max_cache_fd: 32
allow_multi_rates: false
valid_max_cache_size: null
exclude_weight_decay: false
exclude_weight_decay_conf: {}
optim: adam
optim_conf:
lr: 0.1
scheduler: null
scheduler_conf: {}
token_list:
- <blank>
- <unk>
-
- a
- o
- i
- s
- r
- e
- n
- c
- u
- l
- m
- b
- g
- t
- ▁de
- ▁a
- en
- do
- er
- ▁p
- ra
- ta
- te
- h
- ▁que
- p
- ▁la
- ▁el
- ▁es
- to
- d
- da
- es
- ▁no
- os
- y
- ▁y
- ▁ma
- ▁un
- ▁se
- ▁en
- la
- f
- z
- ñ
- '0'
- '4'
- '3'
- w
- '6'
- '8'
- '9'
- '2'
- '1'
- x
- j
- q
- '5'
- '7'
- v
- <sos/eos>
init: null
use_preprocessor: true
token_type: bpe
bpemodel: data/es_token_list/bpe_unigram64/bpe.model
non_linguistic_symbols: null
cleaner: null
g2p: null
lm: seq_rnn
lm_conf:
unit: 650
nlayers: 2
model: lm
model_conf: {}
required:
- output_dir
- token_list
version: '202402'
distributed: false