mvijay97
Add model files
c215727
config: conf/tuning/train_asr_conformer5.yaml
print_config: false
log_level: INFO
dry_run: false
iterator_type: sequence
output_dir: exp/asr_train_asr_conformer5_raw_bpe150_sp
ngpu: 1
seed: 0
num_workers: 1
num_att_plot: 3
dist_backend: nccl
dist_init_method: env://
dist_world_size: null
dist_rank: null
local_rank: 0
dist_master_addr: null
dist_master_port: null
dist_launcher: null
multiprocessing_distributed: false
unused_parameters: false
sharded_ddp: false
cudnn_enabled: true
cudnn_benchmark: false
cudnn_deterministic: false
collect_stats: false
write_collected_feats: false
max_epoch: 50
patience: null
val_scheduler_criterion:
- valid
- loss
early_stopping_criterion:
- valid
- loss
- min
best_model_criterion:
- - valid
- acc
- max
keep_nbest_models: 10
nbest_averaging_interval: 0
grad_clip: 3
grad_clip_type: 2.0
grad_noise: false
accum_grad: 1
no_forward_run: false
resume: true
train_dtype: float32
use_amp: true
log_interval: null
use_matplotlib: true
use_tensorboard: true
use_wandb: false
wandb_project: null
wandb_id: null
wandb_entity: null
wandb_name: null
wandb_model_log_interval: -1
detect_anomaly: false
pretrain_path: null
init_param: []
ignore_init_mismatch: false
freeze_param: []
num_iters_per_epoch: null
batch_size: 20
valid_batch_size: null
batch_bins: 2500000
valid_batch_bins: null
train_shape_file:
- exp/asr_stats_raw_bpe150_sp/train/speech_shape
- exp/asr_stats_raw_bpe150_sp/train/text_shape.bpe
valid_shape_file:
- exp/asr_stats_raw_bpe150_sp/valid/speech_shape
- exp/asr_stats_raw_bpe150_sp/valid/text_shape.bpe
batch_type: numel
valid_batch_type: null
fold_length:
- 80000
- 150
sort_in_batch: descending
sort_batch: descending
multiple_iterator: false
chunk_length: 500
chunk_shift_ratio: 0.5
num_cache_chunks: 1024
train_data_path_and_name_and_type:
- - dump/raw/train_sp/wav.scp
- speech
- sound
- - dump/raw/train_sp/text
- text
- text
valid_data_path_and_name_and_type:
- - dump/raw/valid/wav.scp
- speech
- sound
- - dump/raw/valid/text
- text
- text
allow_variable_data_keys: false
max_cache_size: 0.0
max_cache_fd: 32
valid_max_cache_size: null
optim: adam
optim_conf:
lr: 4.0
scheduler: noamlr
scheduler_conf:
model_size: 256
warmup_steps: 25000
token_list:
- <blank>
- <unk>
-
- s
- i
- o
- e
- a
- n
- u
- t
- r
- ▁c
- ▁a
- é
- c
- l
- ▁de
- ▁d
- d
- ▁est
- m
- ▁le
- p
- er
- v
- f
- ▁l
- ▁s
- à
- ▁m
- ▁la
- ▁p
- re
- ▁un
- ▁il
- ▁vous
- nt
- g
- te
- ur
- in
- ▁qu
- il
- ce
- ▁en
- ▁je
- tre
- et
- ez
- h
- le
- ▁pas
- ▁b
- or
- ve
- b
- x
- ▁une
- ▁y
- ▁au
- ▁bien
- ▁vo
- ge
- ir
- ▁t
- ▁on
- ▁oui
- ▁j
- ▁n
- ment
- ▁ma
- ▁tr
- ▁re
- que
- ▁dan
- ▁par
- ▁du
- ▁que
- è
- ont
- ▁ici
- ▁euh
- ▁se
- ▁ne
- ▁pour
- aire
- z
- ▁ave
- j
- ▁nous
- ▁bon
- ▁tout
- ▁mais
- ▁monsieur
- ette
- ▁tou
- ement
- ▁va
- ▁sur
- ê
- ▁médecin
- tion
- rès
- elle
- ▁alors
- ▁sui
- y
- ▁voir
- ▁ou
- ▁allez
- ▁che
- ▁eau
- and
- ait
- ▁comme
- ▁manivelle
- ▁plaît
- ▁cric
- vous
- ▁bonjour
- puis
- ▁peut
- ▁plus
- ▁malade
- ommes
- ▁route
- ▁deux
- ▁alle
- ▁fait
- droit
- ▁aussi
- vien
- ▁hui
- ▁pharmacie
- ▁merci
- ▁heure
- ▁gauche
- ▁camarade
- û
- ô
- w
- œ
- k
- ù
- â
- î
- ç
- q
- <sos/eos>
init: null
input_size: null
ctc_conf:
dropout_rate: 0.0
ctc_type: builtin
reduce: true
ignore_nan_grad: true
joint_net_conf: null
model_conf:
ctc_weight: 0.3
lsm_weight: 0.1
length_normalized_loss: false
use_preprocessor: true
token_type: bpe
bpemodel: data/token_list/bpe_unigram150/bpe.model
non_linguistic_symbols: null
cleaner: null
g2p: null
speech_volume_normalize: null
rir_scp: null
rir_apply_prob: 1.0
noise_scp: null
noise_apply_prob: 1.0
noise_db_range: '13_15'
frontend: default
frontend_conf:
n_fft: 512
win_length: 400
hop_length: 160
fs: 16k
specaug: specaug
specaug_conf:
apply_time_warp: true
time_warp_window: 5
time_warp_mode: bicubic
apply_freq_mask: true
freq_mask_width_range:
- 0
- 30
num_freq_mask: 2
apply_time_mask: true
time_mask_width_range:
- 0
- 40
num_time_mask: 2
normalize: global_mvn
normalize_conf:
stats_file: exp/asr_stats_raw_bpe150_sp/train/feats_stats.npz
preencoder: null
preencoder_conf: {}
encoder: conformer
encoder_conf:
input_layer: conv2d
num_blocks: 12
linear_units: 2048
dropout_rate: 0.1
output_size: 256
attention_heads: 4
attention_dropout_rate: 0.0
pos_enc_layer_type: rel_pos
selfattention_layer_type: rel_selfattn
activation_type: swish
macaron_style: true
use_cnn_module: true
cnn_module_kernel: 15
postencoder: null
postencoder_conf: {}
decoder: transformer
decoder_conf:
input_layer: embed
num_blocks: 6
linear_units: 2048
dropout_rate: 0.1
required:
- output_dir
- token_list
version: 0.10.7a1
distributed: false