config: conf/train_asr_transformer.yaml print_config: false log_level: INFO drop_last_iter: false dry_run: false iterator_type: sequence valid_iterator_type: null output_dir: exp/asr_train_asr_transformer_raw_es_bpe64_sp ngpu: 0 seed: 0 num_workers: 1 num_att_plot: 3 dist_backend: nccl dist_init_method: env:// dist_world_size: null dist_rank: null local_rank: null dist_master_addr: null dist_master_port: null dist_launcher: null multiprocessing_distributed: false unused_parameters: false sharded_ddp: false cudnn_enabled: true cudnn_benchmark: false cudnn_deterministic: true collect_stats: false write_collected_feats: false max_epoch: 20 patience: null val_scheduler_criterion: - valid - loss early_stopping_criterion: - valid - loss - min best_model_criterion: - - valid - acc - max keep_nbest_models: 10 nbest_averaging_interval: 0 grad_clip: 5.0 grad_clip_type: 2.0 grad_noise: false accum_grad: 1 no_forward_run: false resume: true train_dtype: float32 use_amp: false log_interval: null use_matplotlib: true use_tensorboard: true create_graph_in_tensorboard: false use_wandb: false wandb_project: null wandb_id: null wandb_entity: null wandb_name: null wandb_model_log_interval: -1 detect_anomaly: false use_lora: false save_lora_only: true lora_conf: {} pretrain_path: null init_param: [] ignore_init_mismatch: false freeze_param: [] num_iters_per_epoch: null batch_size: 16 valid_batch_size: null batch_bins: 1000000 valid_batch_bins: null train_shape_file: - exp/asr_stats_raw_es_bpe64_sp/train/speech_shape - exp/asr_stats_raw_es_bpe64_sp/train/text_shape.bpe valid_shape_file: - exp/asr_stats_raw_es_bpe64_sp/valid/speech_shape - exp/asr_stats_raw_es_bpe64_sp/valid/text_shape.bpe batch_type: folded valid_batch_type: null fold_length: - 80000 - 150 sort_in_batch: descending shuffle_within_batch: false sort_batch: descending multiple_iterator: false chunk_length: 500 chunk_shift_ratio: 0.5 num_cache_chunks: 1024 chunk_excluded_key_prefixes: [] chunk_default_fs: null train_data_path_and_name_and_type: - - dump/raw/train_nodev_sp/wav.scp - speech - sound - - dump/raw/train_nodev_sp/text - text - text valid_data_path_and_name_and_type: - - dump/raw/train_dev/wav.scp - speech - sound - - dump/raw/train_dev/text - text - text allow_variable_data_keys: false max_cache_size: 0.0 max_cache_fd: 32 allow_multi_rates: false valid_max_cache_size: null exclude_weight_decay: false exclude_weight_decay_conf: {} optim: adam optim_conf: lr: 0.001 scheduler: warmuplr scheduler_conf: warmup_steps: 2500 token_list: - - - ▁ - a - o - i - s - r - e - n - c - u - l - m - b - g - t - ▁de - ▁a - en - do - er - ▁p - ra - ta - te - h - ▁que - p - ▁la - ▁el - ▁es - to - d - da - es - ▁no - os - y - ▁y - ▁ma - ▁un - ▁se - ▁en - la - f - z - ñ - '0' - '4' - '3' - w - '6' - '8' - '9' - '2' - '1' - x - j - q - '5' - '7' - v - init: xavier_uniform input_size: null ctc_conf: dropout_rate: 0.0 ctc_type: builtin reduce: true ignore_nan_grad: null zero_infinity: true brctc_risk_strategy: exp brctc_group_strategy: end brctc_risk_factor: 0.0 joint_net_conf: null use_preprocessor: true use_lang_prompt: false use_nlp_prompt: false token_type: bpe bpemodel: data/es_token_list/bpe_unigram64/bpe.model non_linguistic_symbols: null cleaner: null g2p: null speech_volume_normalize: null rir_scp: null rir_apply_prob: 1.0 noise_scp: null noise_apply_prob: 1.0 noise_db_range: '13_15' short_noise_thres: 0.5 aux_ctc_tasks: [] frontend: default frontend_conf: fs: 16k specaug: null specaug_conf: {} normalize: global_mvn normalize_conf: stats_file: exp/asr_stats_raw_es_bpe64_sp/train/feats_stats.npz model: espnet model_conf: ctc_weight: 0.3 lsm_weight: 0.1 length_normalized_loss: false preencoder: null preencoder_conf: {} encoder: transformer encoder_conf: output_size: 256 attention_heads: 4 linear_units: 2048 num_blocks: 12 dropout_rate: 0.1 positional_dropout_rate: 0.1 attention_dropout_rate: 0.0 input_layer: conv2d normalize_before: true postencoder: null postencoder_conf: {} decoder: transformer decoder_conf: attention_heads: 4 linear_units: 2048 num_blocks: 6 dropout_rate: 0.1 positional_dropout_rate: 0.1 self_attention_dropout_rate: 0.0 src_attention_dropout_rate: 0.0 preprocessor: default preprocessor_conf: {} required: - output_dir - token_list version: '202402' distributed: false