{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 看看模型"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "主要的训练代码在`fairseq/fairseq_cli/train.py`的`main`函数中，仿照这个函数我们将模型构建起来，看看里头到底是什么东西"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/home/ubuntu/ssk/MoEResearch/MoEc_model/notebooks\n"
     ]
    }
   ],
   "source": [
    "!pwd"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "pwd显示工作目录是在notebooks下，所以使用`sys.path.append`将fairseq和unilm加进来，unilm是作者引入的另外一个库，用于定义MoE模型，不过遵循的大框架还是FariSeq，在unilm中作者也进行了模型注册。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import sys\n",
    "sys.path.append(\"../fairseq\")\n",
    "sys.path.append(\"../\")\n",
    "import fairseq\n",
    "import unilm # import这个玩意，把作者自己定义的模型也注册了"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 只能执行一次，否则报错\n",
    "import torch.distributed as dist\n",
    "dist.init_process_group(backend='nccl', init_method='tcp://localhost:23456', rank=0, world_size=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "from fairseq import (\n",
    "    checkpoint_utils,\n",
    "    options,\n",
    "    quantization_utils,\n",
    "    tasks,\n",
    "    utils,\n",
    ")\n",
    "from fairseq.dataclass.utils import convert_namespace_to_omegaconf"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<module 'fairseq.models' from '/home/ubuntu/ssk/MoEResearch/MoEc_model/notebooks/../fairseq/fairseq/models/__init__.py'>"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "fairseq.models"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "仿照train.py中的main函数先定义argument parser."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "arguments=[\n",
    "        \"../fairseq/data-bin/wmt17_en_de\",\n",
    "        \"--arch\", \"gdmoe_wmt_en_de\",\n",
    "        \"--encoder-moe-layers\", \"3\" ,\n",
    "        \"--decoder-moe-layers\", \"3\" ,\n",
    "        \"--moe-top1-expert\" ,\n",
    "        \"--moe-sublayers\" ,\"3\" ,\n",
    "        \"--moe-expert-count\", \"64\" ,\n",
    "        \"--moe-gating-use-fp32\" ,\n",
    "        \"--tmoe-routing-dim-reduction\",\n",
    "        \"--tmoe-routing-dim\" ,\"32\" ,\n",
    "        \"--tmoe-routing-hard-cosine\" ,\n",
    "        \"--moe-activation-dropout\" ,\"0.0\" ,\n",
    "        \"--moe-dropout\" ,\"0.0\",\n",
    "        ]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "parser = options.get_training_parser()\n",
    "args = options.parse_args_and_arch(parser,input_args=arguments)\n",
    "\n",
    "cfg = convert_namespace_to_omegaconf(args)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'_name': None, 'common': {'_name': None, 'no_progress_bar': False, 'log_interval': 100, 'log_format': None, 'log_file': None, 'tensorboard_logdir': None, 'wandb_project': None, 'azureml_logging': False, 'seed': 1, 'cpu': False, 'tpu': False, 'bf16': False, 'memory_efficient_bf16': False, 'fp16': False, 'memory_efficient_fp16': False, 'fp16_no_flatten_grads': False, 'fp16_init_scale': 128, 'fp16_scale_window': None, 'fp16_scale_tolerance': 0.0, 'on_cpu_convert_precision': False, 'min_loss_scale': 0.0001, 'threshold_loss_scale': None, 'amp': False, 'amp_batch_retries': 2, 'amp_init_scale': 128, 'amp_scale_window': None, 'user_dir': None, 'empty_cache_freq': 0, 'all_gather_list_size': 16384, 'model_parallel_size': 1, 'quantization_config_path': None, 'profile': False, 'reset_logging': False, 'suppress_crashes': False, 'use_plasma_view': False, 'plasma_path': '/tmp/plasma'}, 'common_eval': {'_name': None, 'path': None, 'post_process': None, 'quiet': False, 'model_overrides': '{}', 'results_path': None}, 'distributed_training': {'_name': None, 'distributed_world_size': 2, 'distributed_num_procs': 2, 'distributed_rank': 0, 'distributed_backend': 'nccl', 'distributed_init_method': None, 'distributed_port': -1, 'device_id': 0, 'distributed_no_spawn': False, 'ddp_backend': 'pytorch_ddp', 'ddp_comm_hook': 'none', 'bucket_cap_mb': 25, 'fix_batches_to_gpus': False, 'find_unused_parameters': False, 'gradient_as_bucket_view': False, 'fast_stat_sync': False, 'heartbeat_timeout': -1, 'broadcast_buffers': False, 'slowmo_momentum': None, 'slowmo_algorithm': 'LocalSGD', 'localsgd_frequency': 3, 'nprocs_per_node': 2, 'pipeline_model_parallel': False, 'pipeline_balance': None, 'pipeline_devices': None, 'pipeline_chunks': 0, 'pipeline_encoder_balance': None, 'pipeline_encoder_devices': None, 'pipeline_decoder_balance': None, 'pipeline_decoder_devices': None, 'pipeline_checkpoint': 'never', 'zero_sharding': 'none', 'fp16': False, 'memory_efficient_fp16': False, 'tpu': False, 'no_reshard_after_forward': False, 'fp32_reduce_scatter': False, 'cpu_offload': False, 'use_sharded_state': False}, 'dataset': {'_name': None, 'num_workers': 1, 'skip_invalid_size_inputs_valid_test': False, 'max_tokens': None, 'batch_size': None, 'required_batch_size_multiple': 8, 'required_seq_len_multiple': 1, 'dataset_impl': None, 'data_buffer_size': 10, 'train_subset': 'train', 'valid_subset': 'valid', 'combine_valid_subsets': None, 'ignore_unused_valid_subsets': False, 'validate_interval': 1, 'validate_interval_updates': 0, 'validate_after_updates': 0, 'fixed_validation_seed': None, 'disable_validation': False, 'max_tokens_valid': None, 'batch_size_valid': None, 'max_valid_steps': None, 'curriculum': 0, 'gen_subset': 'test', 'num_shards': 1, 'shard_id': 0}, 'optimization': {'_name': None, 'max_epoch': 0, 'max_update': 0, 'stop_time_hours': 0.0, 'clip_norm': 0.0, 'sentence_avg': False, 'update_freq': [1], 'lr': [0.25], 'stop_min_lr': -1.0, 'use_bmuf': False}, 'checkpoint': {'_name': None, 'save_dir': 'checkpoints', 'restore_file': 'checkpoint_last.pt', 'finetune_from_model': None, 'reset_dataloader': False, 'reset_lr_scheduler': False, 'reset_meters': False, 'reset_optimizer': False, 'optimizer_overrides': '{}', 'save_interval': 1, 'save_interval_updates': 0, 'keep_interval_updates': -1, 'keep_interval_updates_pattern': -1, 'keep_last_epochs': -1, 'keep_best_checkpoints': -1, 'no_save': False, 'no_epoch_checkpoints': False, 'no_last_checkpoints': False, 'no_save_optimizer_state': False, 'best_checkpoint_metric': 'loss', 'maximize_best_checkpoint_metric': False, 'patience': -1, 'checkpoint_suffix': '', 'checkpoint_shard_count': 1, 'load_checkpoint_on_all_dp_ranks': False, 'write_checkpoints_asynchronously': False, 'model_parallel_size': 1}, 'bmuf': {'_name': None, 'block_lr': 1.0, 'block_momentum': 0.875, 'global_sync_iter': 50, 'warmup_iterations': 500, 'use_nbm': False, 'average_sync': False, 'distributed_world_size': 2}, 'generation': {'_name': None, 'beam': 5, 'nbest': 1, 'max_len_a': 0.0, 'max_len_b': 200, 'min_len': 1, 'match_source_len': False, 'unnormalized': False, 'no_early_stop': False, 'no_beamable_mm': False, 'lenpen': 1.0, 'unkpen': 0.0, 'replace_unk': None, 'sacrebleu': False, 'score_reference': False, 'prefix_size': 0, 'no_repeat_ngram_size': 0, 'sampling': False, 'sampling_topk': -1, 'sampling_topp': -1.0, 'constraints': None, 'temperature': 1.0, 'diverse_beam_groups': -1, 'diverse_beam_strength': 0.5, 'diversity_rate': -1.0, 'print_alignment': None, 'print_step': False, 'lm_path': None, 'lm_weight': 0.0, 'iter_decode_eos_penalty': 0.0, 'iter_decode_max_iter': 10, 'iter_decode_force_max_iter': False, 'iter_decode_with_beam': 1, 'iter_decode_with_external_reranker': False, 'retain_iter_history': False, 'retain_dropout': False, 'retain_dropout_modules': None, 'decoding_format': None, 'no_seed_provided': False}, 'eval_lm': {'_name': None, 'output_word_probs': False, 'output_word_stats': False, 'context_window': 0, 'softmax_batch': 9223372036854775807}, 'interactive': {'_name': None, 'buffer_size': 0, 'input': '-'}, 'model': Namespace(no_progress_bar=False, log_interval=100, log_format=None, log_file=None, tensorboard_logdir=None, wandb_project=None, azureml_logging=False, seed=1, cpu=False, tpu=False, bf16=False, memory_efficient_bf16=False, fp16=False, memory_efficient_fp16=False, fp16_no_flatten_grads=False, fp16_init_scale=128, fp16_scale_window=None, fp16_scale_tolerance=0.0, on_cpu_convert_precision=False, min_loss_scale=0.0001, threshold_loss_scale=None, amp=False, amp_batch_retries=2, amp_init_scale=128, amp_scale_window=None, user_dir=None, empty_cache_freq=0, all_gather_list_size=16384, model_parallel_size=1, quantization_config_path=None, profile=False, reset_logging=False, suppress_crashes=False, use_plasma_view=False, plasma_path='/tmp/plasma', criterion='cross_entropy', tokenizer=None, bpe=None, optimizer=None, lr_scheduler='fixed', simul_type=None, scoring='bleu', task='translation', num_workers=1, skip_invalid_size_inputs_valid_test=False, max_tokens=None, batch_size=None, required_batch_size_multiple=8, required_seq_len_multiple=1, dataset_impl=None, data_buffer_size=10, train_subset='train', valid_subset='valid', combine_valid_subsets=None, ignore_unused_valid_subsets=False, validate_interval=1, validate_interval_updates=0, validate_after_updates=0, fixed_validation_seed=None, disable_validation=False, max_tokens_valid=None, batch_size_valid=None, max_valid_steps=None, curriculum=0, gen_subset='test', num_shards=1, shard_id=0, distributed_world_size=2, distributed_num_procs=2, distributed_rank=0, distributed_backend='nccl', distributed_init_method=None, distributed_port=-1, device_id=0, distributed_no_spawn=False, ddp_backend='pytorch_ddp', ddp_comm_hook='none', bucket_cap_mb=25, fix_batches_to_gpus=False, find_unused_parameters=False, gradient_as_bucket_view=False, fast_stat_sync=False, heartbeat_timeout=-1, broadcast_buffers=False, slowmo_momentum=None, slowmo_algorithm='LocalSGD', localsgd_frequency=3, nprocs_per_node=2, pipeline_model_parallel=False, pipeline_balance=None, pipeline_devices=None, pipeline_chunks=0, pipeline_encoder_balance=None, pipeline_encoder_devices=None, pipeline_decoder_balance=None, pipeline_decoder_devices=None, pipeline_checkpoint='never', zero_sharding='none', no_reshard_after_forward=False, fp32_reduce_scatter=False, cpu_offload=False, use_sharded_state=False, arch='gdmoe_wmt_en_de', max_epoch=0, max_update=0, stop_time_hours=0, clip_norm=0.0, sentence_avg=False, update_freq=[1], lr=[0.25], stop_min_lr=-1.0, use_bmuf=False, save_dir='checkpoints', restore_file='checkpoint_last.pt', finetune_from_model=None, reset_dataloader=False, reset_lr_scheduler=False, reset_meters=False, reset_optimizer=False, optimizer_overrides='{}', save_interval=1, save_interval_updates=0, keep_interval_updates=-1, keep_interval_updates_pattern=-1, keep_last_epochs=-1, keep_best_checkpoints=-1, no_save=False, no_epoch_checkpoints=False, no_last_checkpoints=False, no_save_optimizer_state=False, best_checkpoint_metric='loss', maximize_best_checkpoint_metric=False, patience=-1, checkpoint_suffix='', checkpoint_shard_count=1, load_checkpoint_on_all_dp_ranks=False, write_checkpoints_asynchronously=False, store_ema=False, ema_decay=0.9999, ema_start_update=0, ema_seed_model=None, ema_update_freq=1, ema_fp32=False, encoder_moe_freq=0, decoder_moe_freq=0, encoder_moe_layers='3', decoder_moe_layers='3', activation_fn='relu', dropout=0.3, attention_dropout=0.1, activation_dropout=0.0, encoder_embed_dim=1024, encoder_output_dim=512, encoder_input_dim=512, encoder_ffn_embed_dim=4096, encoder_layers=6, encoder_attention_heads=16, encoder_normalize_before=False, no_encoder_final_norm=False, no_token_positional_embeddings=False, share_encoder_input_output_embed=False, encoder_learned_pos=False, layernorm_embedding=False, no_scale_embedding=False, checkpoint_activations=False, offload_activations=False, encoder_layerdrop=0.0, encoder_layers_to_keep=None, min_params_to_wrap=100000000, max_target_positions=1024, pooler_activation_fn='relu', pooler_dropout=0.0, task_moe=False, num_experts=2, rel_pos_buckets=0, max_rel_pos=0, rescale_init=False, ffn_layernorm=False, sharded_save=False, alternate_decoder_ffn_embed_dim=0, moe_freq=0, moe_expert_count=64, moe_gating_use_fp32=True, moe_second_expert_policy='sampling', moe_normalize_gate_prob_before_dropping=False, moe_expert_ffn_dim=None, moe_top1_expert=True, moe_eval_capacity_token_fraction=0.25, moe_normalize_expert_grad='world_size', use_moe_pad_mask=False, record_a2a_perf_stats=False, dummy_a2a=False, moe_batch_prioritized_routing=False, use_stable_embedding=False, transformer_moe_layers='', fine_tune_stage=False, fine_tune_stage_restore_model_path='', tmoe_routing_dim_reduction=True, tmoe_routing_dim=32, tmoe_routing_hard_cosine=True, insert_transformer_moe_layers='', moe_sublayers=3, moe_dropout=0.0, moe_activation_dropout=0.0, capacity_factor=2, token_shuffle=False, layer_norm_after_moe=False, exp_level_drop=0.1, dropout_interval=500, var_coef=0.0, group_num=1, coef_type=1, data='../fairseq/data-bin/wmt17_en_de', source_lang=None, target_lang=None, load_alignments=False, left_pad_source=True, left_pad_target=False, max_source_positions=1024, upsample_primary=-1, truncate_source=False, num_batch_buckets=0, eval_bleu=False, eval_bleu_args='{}', eval_bleu_detok='space', eval_bleu_detok_args='{}', eval_tokenized_bleu=False, eval_bleu_remove_bpe=None, eval_bleu_print_samples=False, force_anneal=None, lr_shrink=0.1, warmup_updates=0, pad=1, eos=2, unk=3, no_seed_provided=False, decoder_embed_dim=1024, decoder_ffn_embed_dim=4096, decoder_attention_heads=16, encoder_embed_path=None, decoder_embed_path=None, decoder_layers=6, decoder_normalize_before=False, decoder_learned_pos=False, adaptive_softmax_cutoff=None, adaptive_softmax_dropout=0, share_decoder_input_output_embed=False, share_all_embeddings=False, adaptive_input=False, no_cross_attention=False, cross_self_attention=False, decoder_output_dim=1024, decoder_input_dim=1024, tie_adaptive_weights=False, decoder_layers_to_keep=None, decoder_layerdrop=0, quant_noise_pq=0, quant_noise_pq_block_size=8, quant_noise_scalar=0, _name='gdmoe_wmt_en_de'), 'task': {'_name': 'translation', 'data': '../fairseq/data-bin/wmt17_en_de', 'source_lang': None, 'target_lang': None, 'load_alignments': False, 'left_pad_source': True, 'left_pad_target': False, 'max_source_positions': 1024, 'max_target_positions': 1024, 'upsample_primary': -1, 'truncate_source': False, 'num_batch_buckets': 0, 'train_subset': 'train', 'dataset_impl': None, 'required_seq_len_multiple': 1, 'eval_bleu': False, 'eval_bleu_args': '{}', 'eval_bleu_detok': 'space', 'eval_bleu_detok_args': '{}', 'eval_tokenized_bleu': False, 'eval_bleu_remove_bpe': None, 'eval_bleu_print_samples': False}, 'criterion': {'_name': 'cross_entropy', 'sentence_avg': False}, 'optimizer': None, 'lr_scheduler': {'_name': 'fixed', 'force_anneal': None, 'lr_shrink': 0.1, 'warmup_updates': 0, 'lr': [0.25]}, 'scoring': {'_name': 'bleu', 'pad': 1, 'eos': 2, 'unk': 3}, 'bpe': None, 'tokenizer': None, 'ema': {'_name': None, 'store_ema': False, 'ema_decay': 0.9999, 'ema_start_update': 0, 'ema_seed_model': None, 'ema_update_freq': 1, 'ema_fp32': False}, 'simul_type': None}"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "cfg"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "我们首先看看注册了那些模型？\n",
    "\n",
    "可以看到基本的模型都有，Transformer，huggingface_gpt2等等"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'transformer': fairseq.models.transformer.transformer_legacy.TransformerModel,\n",
       " 'transformer_align': fairseq.models.transformer_align.TransformerAlignModel,\n",
       " 'lstm': fairseq.models.lstm.LSTMModel,\n",
       " 'lstm_lm': fairseq.models.lstm_lm.LSTMLanguageModel,\n",
       " 'tacotron_2': fairseq.models.text_to_speech.tacotron2.Tacotron2Model,\n",
       " 'tts_transformer': fairseq.models.text_to_speech.tts_transformer.TTSTransformerModel,\n",
       " 'fastspeech2': fairseq.models.text_to_speech.fastspeech2.FastSpeech2Model,\n",
       " 'transformer_from_pretrained_xlm': fairseq.models.transformer_from_pretrained_xlm.TransformerFromPretrainedXLMModel,\n",
       " 'nonautoregressive_transformer': fairseq.models.nat.nonautoregressive_transformer.NATransformerModel,\n",
       " 'nacrf_transformer': fairseq.models.nat.nat_crf_transformer.NACRFTransformerModel,\n",
       " 'iterative_nonautoregressive_transformer': fairseq.models.nat.iterative_nonautoregressive_transformer.IterNATransformerModel,\n",
       " 'cmlm_transformer': fairseq.models.nat.cmlm_transformer.CMLMNATransformerModel,\n",
       " 'levenshtein_transformer': fairseq.models.nat.levenshtein_transformer.LevenshteinTransformerModel,\n",
       " 'insertion_transformer': fairseq.models.nat.insertion_transformer.InsertionTransformerModel,\n",
       " 'masked_lm': fairseq.models.masked_lm.MaskedLMModel,\n",
       " 'fconv_self_att': fairseq.models.fconv_self_att.FConvModelSelfAtt,\n",
       " 'roberta': fairseq.models.roberta.model.RobertaModel,\n",
       " 'roberta_enc_dec': fairseq.models.roberta.enc_dec.RobertaEncDecModel,\n",
       " 'camembert': fairseq.models.roberta.model_camembert.CamembertModel,\n",
       " 'gottbert': fairseq.models.roberta.model_gottbert.GottbertModel,\n",
       " 'xlmr': fairseq.models.roberta.model_xlmr.XLMRModel,\n",
       " 's2t_berard': fairseq.models.speech_to_text.berard.BerardModel,\n",
       " 'convtransformer': fairseq.models.speech_to_text.convtransformer.ConvTransformerModel,\n",
       " 's2t_transformer': fairseq.models.speech_to_text.s2t_transformer.S2TTransformerModel,\n",
       " 'wav2vec': fairseq.models.wav2vec.wav2vec.Wav2VecModel,\n",
       " 'wav2vec2': fairseq.models.wav2vec.wav2vec2.Wav2Vec2Model,\n",
       " 'wav2vec_ctc': fairseq.models.wav2vec.wav2vec2_asr.Wav2VecCtc,\n",
       " 'wav2vec_seq2seq': fairseq.models.wav2vec.wav2vec2_asr.Wav2Vec2Seq2SeqModel,\n",
       " 'xm_transformer': fairseq.models.speech_to_text.xm_transformer.XMTransformerModel,\n",
       " 'fconv': fairseq.models.fconv.FConvModel,\n",
       " 'fconv_lm': fairseq.models.fconv_lm.FConvLanguageModel,\n",
       " 'multilingual_transformer': fairseq.models.multilingual_transformer.MultilingualTransformerModel,\n",
       " 'bart': fairseq.models.bart.model.BARTModel,\n",
       " 'transformer_lm': fairseq.models.transformer_lm.TransformerLanguageModel,\n",
       " 'lightconv': fairseq.models.lightconv.LightConvModel,\n",
       " 'lightconv_lm': fairseq.models.lightconv_lm.LightConvLanguageModel,\n",
       " 'hubert': fairseq.models.hubert.hubert.HubertModel,\n",
       " 'hubert_ctc': fairseq.models.hubert.hubert_asr.HubertCtc,\n",
       " 'hf_gpt2': fairseq.models.huggingface.hf_gpt2.HuggingFaceGPT2LanguageModel,\n",
       " 'convtransformer_simul_trans': examples.simultaneous_translation.models.convtransformer_simul_trans.SimulConvTransformerModel,\n",
       " 'convtransformer_augmented_memory': fairseq.models.speech_to_text.modules.augmented_memory_attention.augmented_memory.<locals>.StreamSeq2SeqModel,\n",
       " 'convtransformer_emformer': examples.simultaneous_translation.models.convtransformer_simul_trans.ConvtransformerEmformer,\n",
       " 'transformer_unidirectional': examples.simultaneous_translation.models.transformer_monotonic_attention.TransformerUnidirectionalModel,\n",
       " 'transformer_monotonic': examples.simultaneous_translation.models.transformer_monotonic_attention.TransformerModelSimulTrans,\n",
       " 'dummy_model': fairseq.benchmark.dummy_model.DummyModel,\n",
       " 'model_parallel_transformer': fairseq.model_parallel.models.transformer.ModelParallelTransformerModel,\n",
       " 'model_parallel_transformer_lm': fairseq.model_parallel.models.transformer_lm.ModelParallelTransformerLanguageModel,\n",
       " 'model_parallel_roberta': fairseq.model_parallel.models.roberta.model.ModelParallelRobertaModel,\n",
       " 'pipeline_parallel_transformer': fairseq.model_parallel.models.pipeline_parallel_transformer.model.PipelineParallelTransformerModel,\n",
       " 'unilm': unilm.models.unilm.UniLMModel,\n",
       " 'unilm_gshard': unilm.models.unilm_gshard.UniLMGshardModel,\n",
       " 'unilm_tmoe': unilm.models.unilm_tmoe.UniLMTMoEModel,\n",
       " 'tmoe': unilm.models.moe_legacy.TMoEModel,\n",
       " 'unilm_tmoe_group': unilm.models.unilm_tmoe_group.UniLMTMoEModel,\n",
       " 'unilm_gdmoe': unilm.models.unilm_gdmoe.UniLMGDMoEModel,\n",
       " 'tmoe_group': unilm.models.moe_legacy_group.TMoEModel,\n",
       " 'gdmoe': unilm.models.gdmoe_legacy.GDMoEModel}"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "fairseq.models.MODEL_REGISTRY"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "看看注册了哪些架构？\n",
    "\n",
    "一般来说不同的架构适用于不同的任务，或者有不同的参数量，比如\n",
    "- transformer_iwslt_de_en/transformer_wmt_en_de： de->en translation，不同任务\n",
    "- bert_base/bert_large: 不同参数量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'transformer_tiny': fairseq.models.transformer.transformer_legacy.TransformerModel,\n",
       " 'transformer': fairseq.models.transformer.transformer_legacy.TransformerModel,\n",
       " 'transformer_iwslt_de_en': fairseq.models.transformer.transformer_legacy.TransformerModel,\n",
       " 'transformer_wmt_en_de': fairseq.models.transformer.transformer_legacy.TransformerModel,\n",
       " 'transformer_vaswani_wmt_en_de_big': fairseq.models.transformer.transformer_legacy.TransformerModel,\n",
       " 'transformer_vaswani_wmt_en_fr_big': fairseq.models.transformer.transformer_legacy.TransformerModel,\n",
       " 'transformer_wmt_en_de_big': fairseq.models.transformer.transformer_legacy.TransformerModel,\n",
       " 'transformer_wmt_en_de_big_t2t': fairseq.models.transformer.transformer_legacy.TransformerModel,\n",
       " 'transformer_align': fairseq.models.transformer_align.TransformerAlignModel,\n",
       " 'transformer_wmt_en_de_big_align': fairseq.models.transformer_align.TransformerAlignModel,\n",
       " 'lstm': fairseq.models.lstm.LSTMModel,\n",
       " 'lstm_wiseman_iwslt_de_en': fairseq.models.lstm.LSTMModel,\n",
       " 'lstm_luong_wmt_en_de': fairseq.models.lstm.LSTMModel,\n",
       " 'lstm_lm': fairseq.models.lstm_lm.LSTMLanguageModel,\n",
       " 'tacotron_2': fairseq.models.text_to_speech.tacotron2.Tacotron2Model,\n",
       " 'tts_transformer': fairseq.models.text_to_speech.tts_transformer.TTSTransformerModel,\n",
       " 'fastspeech2': fairseq.models.text_to_speech.fastspeech2.FastSpeech2Model,\n",
       " 'transformer_from_pretrained_xlm': fairseq.models.transformer_from_pretrained_xlm.TransformerFromPretrainedXLMModel,\n",
       " 'nonautoregressive_transformer': fairseq.models.nat.nonautoregressive_transformer.NATransformerModel,\n",
       " 'nonautoregressive_transformer_wmt_en_de': fairseq.models.nat.nonautoregressive_transformer.NATransformerModel,\n",
       " 'nacrf_transformer': fairseq.models.nat.nat_crf_transformer.NACRFTransformerModel,\n",
       " 'iterative_nonautoregressive_transformer': fairseq.models.nat.iterative_nonautoregressive_transformer.IterNATransformerModel,\n",
       " 'iterative_nonautoregressive_transformer_wmt_en_de': fairseq.models.nat.iterative_nonautoregressive_transformer.IterNATransformerModel,\n",
       " 'cmlm_transformer': fairseq.models.nat.cmlm_transformer.CMLMNATransformerModel,\n",
       " 'cmlm_transformer_wmt_en_de': fairseq.models.nat.cmlm_transformer.CMLMNATransformerModel,\n",
       " 'levenshtein_transformer': fairseq.models.nat.levenshtein_transformer.LevenshteinTransformerModel,\n",
       " 'levenshtein_transformer_wmt_en_de': fairseq.models.nat.levenshtein_transformer.LevenshteinTransformerModel,\n",
       " 'levenshtein_transformer_vaswani_wmt_en_de_big': fairseq.models.nat.levenshtein_transformer.LevenshteinTransformerModel,\n",
       " 'levenshtein_transformer_wmt_en_de_big': fairseq.models.nat.levenshtein_transformer.LevenshteinTransformerModel,\n",
       " 'insertion_transformer': fairseq.models.nat.insertion_transformer.InsertionTransformerModel,\n",
       " 'masked_lm': fairseq.models.masked_lm.MaskedLMModel,\n",
       " 'bert_base': fairseq.models.masked_lm.MaskedLMModel,\n",
       " 'bert_large': fairseq.models.masked_lm.MaskedLMModel,\n",
       " 'xlm_base': fairseq.models.masked_lm.MaskedLMModel,\n",
       " 'fconv_self_att': fairseq.models.fconv_self_att.FConvModelSelfAtt,\n",
       " 'fconv_self_att_wp': fairseq.models.fconv_self_att.FConvModelSelfAtt,\n",
       " 'roberta': fairseq.models.roberta.model.RobertaModel,\n",
       " 'roberta_prenorm': fairseq.models.roberta.model.RobertaModel,\n",
       " 'roberta_base': fairseq.models.roberta.model.RobertaModel,\n",
       " 'roberta_large': fairseq.models.roberta.model.RobertaModel,\n",
       " 'xlm': fairseq.models.roberta.model.RobertaModel,\n",
       " 'roberta_enc_dec': fairseq.models.roberta.enc_dec.RobertaEncDecModel,\n",
       " 's2t_berard': fairseq.models.speech_to_text.berard.BerardModel,\n",
       " 's2t_berard_256_3_3': fairseq.models.speech_to_text.berard.BerardModel,\n",
       " 's2t_berard_512_3_2': fairseq.models.speech_to_text.berard.BerardModel,\n",
       " 's2t_berard_512_5_3': fairseq.models.speech_to_text.berard.BerardModel,\n",
       " 'convtransformer': fairseq.models.speech_to_text.convtransformer.ConvTransformerModel,\n",
       " 'convtransformer_espnet': fairseq.models.speech_to_text.convtransformer.ConvTransformerModel,\n",
       " 's2t_transformer': fairseq.models.speech_to_text.s2t_transformer.S2TTransformerModel,\n",
       " 's2t_transformer_s': fairseq.models.speech_to_text.s2t_transformer.S2TTransformerModel,\n",
       " 's2t_transformer_xs': fairseq.models.speech_to_text.s2t_transformer.S2TTransformerModel,\n",
       " 's2t_transformer_sp': fairseq.models.speech_to_text.s2t_transformer.S2TTransformerModel,\n",
       " 's2t_transformer_m': fairseq.models.speech_to_text.s2t_transformer.S2TTransformerModel,\n",
       " 's2t_transformer_mp': fairseq.models.speech_to_text.s2t_transformer.S2TTransformerModel,\n",
       " 's2t_transformer_l': fairseq.models.speech_to_text.s2t_transformer.S2TTransformerModel,\n",
       " 's2t_transformer_lp': fairseq.models.speech_to_text.s2t_transformer.S2TTransformerModel,\n",
       " 'wav2vec': fairseq.models.wav2vec.wav2vec.Wav2VecModel,\n",
       " 'wav2vec2': fairseq.models.wav2vec.wav2vec2.Wav2Vec2Model,\n",
       " 'wav2vec_ctc': fairseq.models.wav2vec.wav2vec2_asr.Wav2VecCtc,\n",
       " 'wav2vec_seq2seq': fairseq.models.wav2vec.wav2vec2_asr.Wav2Vec2Seq2SeqModel,\n",
       " 'xm_transformer': fairseq.models.speech_to_text.xm_transformer.XMTransformerModel,\n",
       " 'fconv': fairseq.models.fconv.FConvModel,\n",
       " 'fconv_iwslt_de_en': fairseq.models.fconv.FConvModel,\n",
       " 'fconv_wmt_en_ro': fairseq.models.fconv.FConvModel,\n",
       " 'fconv_wmt_en_de': fairseq.models.fconv.FConvModel,\n",
       " 'fconv_wmt_en_fr': fairseq.models.fconv.FConvModel,\n",
       " 'fconv_lm': fairseq.models.fconv_lm.FConvLanguageModel,\n",
       " 'fconv_lm_dauphin_wikitext103': fairseq.models.fconv_lm.FConvLanguageModel,\n",
       " 'fconv_lm_dauphin_gbw': fairseq.models.fconv_lm.FConvLanguageModel,\n",
       " 'multilingual_transformer': fairseq.models.multilingual_transformer.MultilingualTransformerModel,\n",
       " 'multilingual_transformer_iwslt_de_en': fairseq.models.multilingual_transformer.MultilingualTransformerModel,\n",
       " 'bart_large': fairseq.models.bart.model.BARTModel,\n",
       " 'bart_base': fairseq.models.bart.model.BARTModel,\n",
       " 'mbart_large': fairseq.models.bart.model.BARTModel,\n",
       " 'mbart_base': fairseq.models.bart.model.BARTModel,\n",
       " 'mbart_base_wmt20': fairseq.models.bart.model.BARTModel,\n",
       " 'transformer_lm': fairseq.models.transformer_lm.TransformerLanguageModel,\n",
       " 'transformer_lm_big': fairseq.models.transformer_lm.TransformerLanguageModel,\n",
       " 'transformer_lm_baevski_wiki103': fairseq.models.transformer_lm.TransformerLanguageModel,\n",
       " 'transformer_lm_wiki103': fairseq.models.transformer_lm.TransformerLanguageModel,\n",
       " 'transformer_lm_baevski_gbw': fairseq.models.transformer_lm.TransformerLanguageModel,\n",
       " 'transformer_lm_gbw': fairseq.models.transformer_lm.TransformerLanguageModel,\n",
       " 'transformer_lm_gpt': fairseq.models.transformer_lm.TransformerLanguageModel,\n",
       " 'transformer_lm_gpt2_small': fairseq.models.transformer_lm.TransformerLanguageModel,\n",
       " 'transformer_lm_gpt2_tiny': fairseq.models.transformer_lm.TransformerLanguageModel,\n",
       " 'transformer_lm_gpt2_medium': fairseq.models.transformer_lm.TransformerLanguageModel,\n",
       " 'transformer_lm_gpt2_big': fairseq.models.transformer_lm.TransformerLanguageModel,\n",
       " 'transformer_lm_gpt3_small': fairseq.models.transformer_lm.TransformerLanguageModel,\n",
       " 'transformer_lm_gpt3_medium': fairseq.models.transformer_lm.TransformerLanguageModel,\n",
       " 'transformer_lm_gpt3_large': fairseq.models.transformer_lm.TransformerLanguageModel,\n",
       " 'transformer_lm_gpt3_xl': fairseq.models.transformer_lm.TransformerLanguageModel,\n",
       " 'transformer_lm_gpt3_2_7': fairseq.models.transformer_lm.TransformerLanguageModel,\n",
       " 'transformer_lm_gpt3_6_7': fairseq.models.transformer_lm.TransformerLanguageModel,\n",
       " 'transformer_lm_gpt3_13': fairseq.models.transformer_lm.TransformerLanguageModel,\n",
       " 'transformer_lm_gpt3_175': fairseq.models.transformer_lm.TransformerLanguageModel,\n",
       " 'lightconv': fairseq.models.lightconv.LightConvModel,\n",
       " 'lightconv_iwslt_de_en': fairseq.models.lightconv.LightConvModel,\n",
       " 'lightconv_wmt_en_de': fairseq.models.lightconv.LightConvModel,\n",
       " 'lightconv_wmt_en_de_big': fairseq.models.lightconv.LightConvModel,\n",
       " 'lightconv_wmt_en_fr_big': fairseq.models.lightconv.LightConvModel,\n",
       " 'lightconv_wmt_zh_en_big': fairseq.models.lightconv.LightConvModel,\n",
       " 'lightconv_lm': fairseq.models.lightconv_lm.LightConvLanguageModel,\n",
       " 'lightconv_lm_gbw': fairseq.models.lightconv_lm.LightConvLanguageModel,\n",
       " 'hubert': fairseq.models.hubert.hubert.HubertModel,\n",
       " 'hubert_ctc': fairseq.models.hubert.hubert_asr.HubertCtc,\n",
       " 'hf_gpt2': fairseq.models.huggingface.hf_gpt2.HuggingFaceGPT2LanguageModel,\n",
       " 'hf_gpt2_medium': fairseq.models.huggingface.hf_gpt2.HuggingFaceGPT2LanguageModel,\n",
       " 'hf_gpt2_large': fairseq.models.huggingface.hf_gpt2.HuggingFaceGPT2LanguageModel,\n",
       " 'hf_gpt2_xl': fairseq.models.huggingface.hf_gpt2.HuggingFaceGPT2LanguageModel,\n",
       " 'convtransformer_simul_trans_espnet': examples.simultaneous_translation.models.convtransformer_simul_trans.SimulConvTransformerModel,\n",
       " 'convtransformer_augmented_memory': fairseq.models.speech_to_text.modules.augmented_memory_attention.augmented_memory.<locals>.StreamSeq2SeqModel,\n",
       " 'convtransformer_emformer': examples.simultaneous_translation.models.convtransformer_simul_trans.ConvtransformerEmformer,\n",
       " 'transformer_monotonic': examples.simultaneous_translation.models.transformer_monotonic_attention.TransformerModelSimulTrans,\n",
       " 'transformer_monotonic_iwslt_de_en': examples.simultaneous_translation.models.transformer_monotonic_attention.TransformerModelSimulTrans,\n",
       " 'transformer_monotonic_vaswani_wmt_en_de_big': examples.simultaneous_translation.models.transformer_monotonic_attention.TransformerModelSimulTrans,\n",
       " 'transformer_monotonic_vaswani_wmt_en_fr_big': examples.simultaneous_translation.models.transformer_monotonic_attention.TransformerModelSimulTrans,\n",
       " 'transformer_unidirectional_iwslt_de_en': examples.simultaneous_translation.models.transformer_monotonic_attention.TransformerUnidirectionalModel,\n",
       " 'transformer_monotonic_tiny': examples.simultaneous_translation.models.transformer_monotonic_attention.TransformerModelSimulTrans,\n",
       " 'dummy_model': fairseq.benchmark.dummy_model.DummyModel,\n",
       " 'transformer_lm_megatron': fairseq.model_parallel.models.transformer_lm.ModelParallelTransformerLanguageModel,\n",
       " 'transformer_lm_megatron_11b': fairseq.model_parallel.models.transformer_lm.ModelParallelTransformerLanguageModel,\n",
       " 'model_parallel_roberta': fairseq.model_parallel.models.roberta.model.ModelParallelRobertaModel,\n",
       " 'model_parallel_roberta_v1': fairseq.model_parallel.models.roberta.model.ModelParallelRobertaModel,\n",
       " 'model_parallel_roberta_postnorm': fairseq.model_parallel.models.roberta.model.ModelParallelRobertaModel,\n",
       " 'model_parallel_roberta_base': fairseq.model_parallel.models.roberta.model.ModelParallelRobertaModel,\n",
       " 'model_parallel_roberta_large': fairseq.model_parallel.models.roberta.model.ModelParallelRobertaModel,\n",
       " 'transformer_iwslt_de_en_pipeline_parallel': fairseq.model_parallel.models.pipeline_parallel_transformer.model.PipelineParallelTransformerModel,\n",
       " 'transformer_wmt_en_de_big_pipeline_parallel': fairseq.model_parallel.models.pipeline_parallel_transformer.model.PipelineParallelTransformerModel,\n",
       " 'unilm': unilm.models.unilm.UniLMModel,\n",
       " 'unilm_base': unilm.models.unilm.UniLMModel,\n",
       " 'unilm_tiny': unilm.models.unilm.UniLMModel,\n",
       " 'unilm_xlarge_24L': unilm.models.unilm.UniLMModel,\n",
       " 'unilm_xlarge_24L_post': unilm.models.unilm.UniLMModel,\n",
       " 'unilm_xlarge_48L': unilm.models.unilm.UniLMModel,\n",
       " 'unilm_xlarge_36L': unilm.models.unilm.UniLMModel,\n",
       " 'unilm_gshard': unilm.models.unilm_gshard.UniLMGshardModel,\n",
       " 'unilm_gshard_base': unilm.models.unilm_gshard.UniLMGshardModel,\n",
       " 'unilm_gshard_xlarge_24L': unilm.models.unilm_gshard.UniLMGshardModel,\n",
       " 'unilm_gshard_xlarge_24L_post': unilm.models.unilm_gshard.UniLMGshardModel,\n",
       " 'unilm_gshard_xlarge_48L': unilm.models.unilm_gshard.UniLMGshardModel,\n",
       " 'unilm_gshard_xlarge_36L': unilm.models.unilm_gshard.UniLMGshardModel,\n",
       " 'unilm_tmoe': unilm.models.unilm_tmoe.UniLMTMoEModel,\n",
       " 'unilm_tmoe_base': unilm.models.unilm_tmoe.UniLMTMoEModel,\n",
       " 'unilm_tmoe_xlarge_24L': unilm.models.unilm_tmoe.UniLMTMoEModel,\n",
       " 'unilm_tmoe_xlarge_24L_post': unilm.models.unilm_tmoe.UniLMTMoEModel,\n",
       " 'unilm_tmoe_xlarge_48L': unilm.models.unilm_tmoe.UniLMTMoEModel,\n",
       " 'unilm_tmoe_xlarge_36L': unilm.models.unilm_tmoe.UniLMTMoEModel,\n",
       " 'unilm_tmoe_xsmall': unilm.models.unilm_tmoe.UniLMTMoEModel,\n",
       " 'tmoe': unilm.models.moe_legacy.TMoEModel,\n",
       " 'tmoe_tiny': unilm.models.moe_legacy.TMoEModel,\n",
       " 'tmoe_iwslt_de_en': unilm.models.moe_legacy.TMoEModel,\n",
       " 'tmoe_wmt_en_de': unilm.models.moe_legacy.TMoEModel,\n",
       " 'unilm_tmoe_group': unilm.models.unilm_tmoe_group.UniLMTMoEModel,\n",
       " 'unilm_tmoe_group_base': unilm.models.unilm_tmoe_group.UniLMTMoEModel,\n",
       " 'unilm_tmoe_group_xlarge_24L': unilm.models.unilm_tmoe_group.UniLMTMoEModel,\n",
       " 'unilm_tmoe_group_xlarge_24L_post': unilm.models.unilm_tmoe_group.UniLMTMoEModel,\n",
       " 'unilm_tmoe_group_xlarge_48L': unilm.models.unilm_tmoe_group.UniLMTMoEModel,\n",
       " 'unilm_tmoe_group_xlarge_36L': unilm.models.unilm_tmoe_group.UniLMTMoEModel,\n",
       " 'unilm_tmoe_group_xsmall': unilm.models.unilm_tmoe_group.UniLMTMoEModel,\n",
       " 'unilm_gdmoe': unilm.models.unilm_gdmoe.UniLMGDMoEModel,\n",
       " 'unilm_gdmoe_base': unilm.models.unilm_gdmoe.UniLMGDMoEModel,\n",
       " 'unilm_gdmoe_xlarge_24L': unilm.models.unilm_gdmoe.UniLMGDMoEModel,\n",
       " 'unilm_gdmoe_xlarge_24L_post': unilm.models.unilm_gdmoe.UniLMGDMoEModel,\n",
       " 'unilm_gdmoe_xlarge_48L': unilm.models.unilm_gdmoe.UniLMGDMoEModel,\n",
       " 'unilm_gdmoe_xlarge_36L': unilm.models.unilm_gdmoe.UniLMGDMoEModel,\n",
       " 'unilm_gdmoe_xsmall': unilm.models.unilm_gdmoe.UniLMGDMoEModel,\n",
       " 'tmoe_group': unilm.models.moe_legacy_group.TMoEModel,\n",
       " 'tmoe_group_tiny': unilm.models.moe_legacy_group.TMoEModel,\n",
       " 'tmoe_group_iwslt_de_en': unilm.models.moe_legacy_group.TMoEModel,\n",
       " 'tmoe_group_wmt_en_de': unilm.models.moe_legacy_group.TMoEModel,\n",
       " 'gdmoe': unilm.models.gdmoe_legacy.GDMoEModel,\n",
       " 'gdmoe_tiny': unilm.models.gdmoe_legacy.GDMoEModel,\n",
       " 'gdmoe_iwslt_de_en': unilm.models.gdmoe_legacy.GDMoEModel,\n",
       " 'gdmoe_wmt_en_de': unilm.models.gdmoe_legacy.GDMoEModel}"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "fairseq.models.ARCH_MODEL_REGISTRY"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "不同的模型有不同的配置，比如有moe的模型我们有不同的Expert数量，这些都会作为超参数，所以我们又要定义一个类存储这些超参"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'transformer_tiny': <function fairseq.models.transformer.transformer_legacy.tiny_architecture(args)>,\n",
       " 'transformer': <function fairseq.models.transformer.transformer_legacy.base_architecture(args)>,\n",
       " 'transformer_iwslt_de_en': <function fairseq.models.transformer.transformer_legacy.transformer_iwslt_de_en(args)>,\n",
       " 'transformer_wmt_en_de': <function fairseq.models.transformer.transformer_legacy.transformer_wmt_en_de(args)>,\n",
       " 'transformer_vaswani_wmt_en_de_big': <function fairseq.models.transformer.transformer_legacy.transformer_vaswani_wmt_en_de_big(args)>,\n",
       " 'transformer_vaswani_wmt_en_fr_big': <function fairseq.models.transformer.transformer_legacy.transformer_vaswani_wmt_en_fr_big(args)>,\n",
       " 'transformer_wmt_en_de_big': <function fairseq.models.transformer.transformer_legacy.transformer_wmt_en_de_big(args)>,\n",
       " 'transformer_wmt_en_de_big_t2t': <function fairseq.models.transformer.transformer_legacy.transformer_wmt_en_de_big_t2t(args)>,\n",
       " 'transformer_align': <function fairseq.models.transformer_align.transformer_align(args)>,\n",
       " 'transformer_wmt_en_de_big_align': <function fairseq.models.transformer_align.transformer_wmt_en_de_big_align(args)>,\n",
       " 'lstm': <function fairseq.models.lstm.base_architecture(args)>,\n",
       " 'lstm_wiseman_iwslt_de_en': <function fairseq.models.lstm.lstm_wiseman_iwslt_de_en(args)>,\n",
       " 'lstm_luong_wmt_en_de': <function fairseq.models.lstm.lstm_luong_wmt_en_de(args)>,\n",
       " 'lstm_lm': <function fairseq.models.lstm_lm.base_architecture(args)>,\n",
       " 'tacotron_2': <function fairseq.models.text_to_speech.tacotron2.base_architecture(args)>,\n",
       " 'tts_transformer': <function fairseq.models.text_to_speech.tts_transformer.base_architecture(args)>,\n",
       " 'fastspeech2': <function fairseq.models.text_to_speech.fastspeech2.base_architecture(args)>,\n",
       " 'transformer_from_pretrained_xlm': <function fairseq.models.transformer_from_pretrained_xlm.base_architecture(args)>,\n",
       " 'nonautoregressive_transformer': <function fairseq.models.nat.nonautoregressive_transformer.base_architecture(args)>,\n",
       " 'nonautoregressive_transformer_wmt_en_de': <function fairseq.models.nat.nonautoregressive_transformer.nonautoregressive_transformer_wmt_en_de(args)>,\n",
       " 'nacrf_transformer': <function fairseq.models.nat.nat_crf_transformer.nacrf_base_architecture(args)>,\n",
       " 'iterative_nonautoregressive_transformer': <function fairseq.models.nat.iterative_nonautoregressive_transformer.inat_base_architecture(args)>,\n",
       " 'iterative_nonautoregressive_transformer_wmt_en_de': <function fairseq.models.nat.iterative_nonautoregressive_transformer.iter_nat_wmt_en_de(args)>,\n",
       " 'cmlm_transformer': <function fairseq.models.nat.cmlm_transformer.cmlm_base_architecture(args)>,\n",
       " 'cmlm_transformer_wmt_en_de': <function fairseq.models.nat.cmlm_transformer.cmlm_wmt_en_de(args)>,\n",
       " 'levenshtein_transformer': <function fairseq.models.nat.levenshtein_transformer.levenshtein_base_architecture(args)>,\n",
       " 'levenshtein_transformer_wmt_en_de': <function fairseq.models.nat.levenshtein_transformer.levenshtein_transformer_wmt_en_de(args)>,\n",
       " 'levenshtein_transformer_vaswani_wmt_en_de_big': <function fairseq.models.nat.levenshtein_transformer.levenshtein_transformer_vaswani_wmt_en_de_big(args)>,\n",
       " 'levenshtein_transformer_wmt_en_de_big': <function fairseq.models.nat.levenshtein_transformer.levenshtein_transformer_wmt_en_de_big_t2t(args)>,\n",
       " 'insertion_transformer': <function fairseq.models.nat.insertion_transformer.insertion_base_architecture(args)>,\n",
       " 'masked_lm': <function fairseq.models.masked_lm.base_architecture(args)>,\n",
       " 'bert_base': <function fairseq.models.masked_lm.bert_base_architecture(args)>,\n",
       " 'bert_large': <function fairseq.models.masked_lm.bert_large_architecture(args)>,\n",
       " 'xlm_base': <function fairseq.models.masked_lm.xlm_architecture(args)>,\n",
       " 'fconv_self_att': <function fairseq.models.fconv_self_att.base_architecture(args)>,\n",
       " 'fconv_self_att_wp': <function fairseq.models.fconv_self_att.fconv_self_att_wp(args)>,\n",
       " 'roberta': <function fairseq.models.roberta.model.base_architecture(args)>,\n",
       " 'roberta_prenorm': <function fairseq.models.roberta.model.roberta_prenorm_architecture(args)>,\n",
       " 'roberta_base': <function fairseq.models.roberta.model.roberta_base_architecture(args)>,\n",
       " 'roberta_large': <function fairseq.models.roberta.model.roberta_large_architecture(args)>,\n",
       " 'xlm': <function fairseq.models.roberta.model.xlm_architecture(args)>,\n",
       " 'roberta_enc_dec': <function fairseq.models.roberta.enc_dec.base_enc_dec_architecture(args)>,\n",
       " 's2t_berard': <function fairseq.models.speech_to_text.berard.berard(args)>,\n",
       " 's2t_berard_256_3_3': <function fairseq.models.speech_to_text.berard.berard_256_3_3(args)>,\n",
       " 's2t_berard_512_3_2': <function fairseq.models.speech_to_text.berard.berard_512_3_2(args)>,\n",
       " 's2t_berard_512_5_3': <function fairseq.models.speech_to_text.berard.berard_512_5_3(args)>,\n",
       " 'convtransformer': <function fairseq.models.speech_to_text.convtransformer.base_architecture(args)>,\n",
       " 'convtransformer_espnet': <function fairseq.models.speech_to_text.convtransformer.convtransformer_espnet(args)>,\n",
       " 's2t_transformer': <function fairseq.models.speech_to_text.s2t_transformer.base_architecture(args)>,\n",
       " 's2t_transformer_s': <function fairseq.models.speech_to_text.s2t_transformer.s2t_transformer_s(args)>,\n",
       " 's2t_transformer_xs': <function fairseq.models.speech_to_text.s2t_transformer.s2t_transformer_xs(args)>,\n",
       " 's2t_transformer_sp': <function fairseq.models.speech_to_text.s2t_transformer.s2t_transformer_sp(args)>,\n",
       " 's2t_transformer_m': <function fairseq.models.speech_to_text.s2t_transformer.s2t_transformer_m(args)>,\n",
       " 's2t_transformer_mp': <function fairseq.models.speech_to_text.s2t_transformer.s2t_transformer_mp(args)>,\n",
       " 's2t_transformer_l': <function fairseq.models.speech_to_text.s2t_transformer.s2t_transformer_l(args)>,\n",
       " 's2t_transformer_lp': <function fairseq.models.speech_to_text.s2t_transformer.s2t_transformer_lp(args)>,\n",
       " 'wav2vec': <function fairseq.models.register_model.<locals>.register_model_cls.<locals>.noop(_)>,\n",
       " 'wav2vec2': <function fairseq.models.register_model.<locals>.register_model_cls.<locals>.noop(_)>,\n",
       " 'wav2vec_ctc': <function fairseq.models.register_model.<locals>.register_model_cls.<locals>.noop(_)>,\n",
       " 'wav2vec_seq2seq': <function fairseq.models.register_model.<locals>.register_model_cls.<locals>.noop(_)>,\n",
       " 'xm_transformer': <function fairseq.models.speech_to_text.xm_transformer.base_architecture(args)>,\n",
       " 'fconv': <function fairseq.models.fconv.base_architecture(args)>,\n",
       " 'fconv_iwslt_de_en': <function fairseq.models.fconv.fconv_iwslt_de_en(args)>,\n",
       " 'fconv_wmt_en_ro': <function fairseq.models.fconv.fconv_wmt_en_ro(args)>,\n",
       " 'fconv_wmt_en_de': <function fairseq.models.fconv.fconv_wmt_en_de(args)>,\n",
       " 'fconv_wmt_en_fr': <function fairseq.models.fconv.fconv_wmt_en_fr(args)>,\n",
       " 'fconv_lm': <function fairseq.models.fconv_lm.base_lm_architecture(args)>,\n",
       " 'fconv_lm_dauphin_wikitext103': <function fairseq.models.fconv_lm.fconv_lm_dauphin_wikitext103(args)>,\n",
       " 'fconv_lm_dauphin_gbw': <function fairseq.models.fconv_lm.fconv_lm_dauphin_gbw(args)>,\n",
       " 'multilingual_transformer': <function fairseq.models.multilingual_transformer.base_multilingual_architecture(args)>,\n",
       " 'multilingual_transformer_iwslt_de_en': <function fairseq.models.multilingual_transformer.multilingual_transformer_iwslt_de_en(args)>,\n",
       " 'bart_large': <function fairseq.models.bart.model.bart_large_architecture(args)>,\n",
       " 'bart_base': <function fairseq.models.bart.model.bart_base_architecture(args)>,\n",
       " 'mbart_large': <function fairseq.models.bart.model.mbart_large_architecture(args)>,\n",
       " 'mbart_base': <function fairseq.models.bart.model.mbart_base_architecture(args)>,\n",
       " 'mbart_base_wmt20': <function fairseq.models.bart.model.mbart_base_wmt20_architecture(args)>,\n",
       " 'transformer_lm': <function fairseq.models.register_model.<locals>.register_model_cls.<locals>.noop(_)>,\n",
       " 'transformer_lm_big': <function fairseq.models.transformer_lm.transformer_lm_big(args)>,\n",
       " 'transformer_lm_baevski_wiki103': <function fairseq.models.transformer_lm.transformer_lm_baevski_wiki103(args)>,\n",
       " 'transformer_lm_wiki103': <function fairseq.models.transformer_lm.transformer_lm_baevski_wiki103(args)>,\n",
       " 'transformer_lm_baevski_gbw': <function fairseq.models.transformer_lm.transformer_lm_baevski_gbw(args)>,\n",
       " 'transformer_lm_gbw': <function fairseq.models.transformer_lm.transformer_lm_baevski_gbw(args)>,\n",
       " 'transformer_lm_gpt': <function fairseq.models.transformer_lm.transformer_lm_gpt(args)>,\n",
       " 'transformer_lm_gpt2_small': <function fairseq.models.transformer_lm.transformer_lm_gpt2_small(args)>,\n",
       " 'transformer_lm_gpt2_tiny': <function fairseq.models.transformer_lm.transformer_lm_gpt2_tiny(args)>,\n",
       " 'transformer_lm_gpt2_medium': <function fairseq.models.transformer_lm.transformer_lm_gpt2_medium(args)>,\n",
       " 'transformer_lm_gpt2_big': <function fairseq.models.transformer_lm.transformer_lm_gpt2_big(args)>,\n",
       " 'transformer_lm_gpt3_small': <function fairseq.models.transformer_lm.transformer_lm_gpt3_small(args)>,\n",
       " 'transformer_lm_gpt3_medium': <function fairseq.models.transformer_lm.transformer_lm_gpt3_medium(args)>,\n",
       " 'transformer_lm_gpt3_large': <function fairseq.models.transformer_lm.transformer_lm_gpt3_large(args)>,\n",
       " 'transformer_lm_gpt3_xl': <function fairseq.models.transformer_lm.transformer_lm_gpt3_xl(args)>,\n",
       " 'transformer_lm_gpt3_2_7': <function fairseq.models.transformer_lm.transformer_lm_gpt3_2_7(args)>,\n",
       " 'transformer_lm_gpt3_6_7': <function fairseq.models.transformer_lm.transformer_lm_gpt3_6_7(args)>,\n",
       " 'transformer_lm_gpt3_13': <function fairseq.models.transformer_lm.transformer_lm_gpt3_13(args)>,\n",
       " 'transformer_lm_gpt3_175': <function fairseq.models.transformer_lm.transformer_lm_gpt3_175(args)>,\n",
       " 'lightconv': <function fairseq.models.lightconv.base_architecture(args)>,\n",
       " 'lightconv_iwslt_de_en': <function fairseq.models.lightconv.lightconv_iwslt_de_en(args)>,\n",
       " 'lightconv_wmt_en_de': <function fairseq.models.lightconv.lightconv_wmt_en_de(args)>,\n",
       " 'lightconv_wmt_en_de_big': <function fairseq.models.lightconv.lightconv_wmt_en_de_big(args)>,\n",
       " 'lightconv_wmt_en_fr_big': <function fairseq.models.lightconv.lightconv_wmt_en_fr_big(args)>,\n",
       " 'lightconv_wmt_zh_en_big': <function fairseq.models.lightconv.lightconv_wmt_zh_en_big(args)>,\n",
       " 'lightconv_lm': <function fairseq.models.lightconv_lm.base_lm_architecture(args)>,\n",
       " 'lightconv_lm_gbw': <function fairseq.models.lightconv_lm.lightconv_lm_gbw(args)>,\n",
       " 'hubert': <function fairseq.models.register_model.<locals>.register_model_cls.<locals>.noop(_)>,\n",
       " 'hubert_ctc': <function fairseq.models.register_model.<locals>.register_model_cls.<locals>.noop(_)>,\n",
       " 'hf_gpt2': <function fairseq.models.huggingface.hf_gpt2.default_architecture(args)>,\n",
       " 'hf_gpt2_medium': <function fairseq.models.huggingface.hf_gpt2.hf_gpt2_medium(args)>,\n",
       " 'hf_gpt2_large': <function fairseq.models.huggingface.hf_gpt2.hf_gpt2_large(args)>,\n",
       " 'hf_gpt2_xl': <function fairseq.models.huggingface.hf_gpt2.hf_gpt2_xl(args)>,\n",
       " 'convtransformer_simul_trans_espnet': <function examples.simultaneous_translation.models.convtransformer_simul_trans.convtransformer_simul_trans_espnet(args)>,\n",
       " 'convtransformer_augmented_memory': <function examples.simultaneous_translation.models.convtransformer_simul_trans.augmented_memory_convtransformer_espnet(args)>,\n",
       " 'convtransformer_emformer': <function examples.simultaneous_translation.models.convtransformer_simul_trans.convtransformer_emformer_base(args)>,\n",
       " 'transformer_monotonic': <function examples.simultaneous_translation.models.transformer_monotonic_attention.base_monotonic_architecture(args)>,\n",
       " 'transformer_monotonic_iwslt_de_en': <function examples.simultaneous_translation.models.transformer_monotonic_attention.transformer_monotonic_iwslt_de_en(args)>,\n",
       " 'transformer_monotonic_vaswani_wmt_en_de_big': <function examples.simultaneous_translation.models.transformer_monotonic_attention.transformer_monotonic_vaswani_wmt_en_de_big(args)>,\n",
       " 'transformer_monotonic_vaswani_wmt_en_fr_big': <function examples.simultaneous_translation.models.transformer_monotonic_attention.transformer_monotonic_vaswani_wmt_en_fr_big(args)>,\n",
       " 'transformer_unidirectional_iwslt_de_en': <function examples.simultaneous_translation.models.transformer_monotonic_attention.transformer_unidirectional_iwslt_de_en(args)>,\n",
       " 'transformer_monotonic_tiny': <function examples.simultaneous_translation.models.transformer_monotonic_attention.monotonic_tiny_architecture(args)>,\n",
       " 'dummy_model': <function fairseq.benchmark.dummy_model.base_architecture(args)>,\n",
       " 'transformer_lm_megatron': <function fairseq.model_parallel.models.transformer_lm.transformer_lm_megatron(args)>,\n",
       " 'transformer_lm_megatron_11b': <function fairseq.model_parallel.models.transformer_lm.transformer_lm_megatron_11b(args)>,\n",
       " 'model_parallel_roberta': <function fairseq.model_parallel.models.roberta.model.base_architecture(args)>,\n",
       " 'model_parallel_roberta_v1': <function fairseq.model_parallel.models.roberta.model.model_parallel_roberta_v1_architecture(args)>,\n",
       " 'model_parallel_roberta_postnorm': <function fairseq.model_parallel.models.roberta.model.model_parallel_roberta_postnorm_architecture(args)>,\n",
       " 'model_parallel_roberta_base': <function fairseq.model_parallel.models.roberta.model.model_parallel_roberta_base_architecture(args)>,\n",
       " 'model_parallel_roberta_large': <function fairseq.model_parallel.models.roberta.model.model_parallel_roberta_large_architecture(args)>,\n",
       " 'transformer_iwslt_de_en_pipeline_parallel': <function fairseq.model_parallel.models.pipeline_parallel_transformer.model.transformer_iwslt_de_en_dist(args)>,\n",
       " 'transformer_wmt_en_de_big_pipeline_parallel': <function fairseq.model_parallel.models.pipeline_parallel_transformer.model.transformer_wmt_en_de_big_dist(args)>,\n",
       " 'unilm': <function fairseq.models.register_model.<locals>.register_model_cls.<locals>.noop(_)>,\n",
       " 'unilm_base': <function unilm.models.unilm.base_unilm_architecture(args)>,\n",
       " 'unilm_tiny': <function unilm.models.unilm.tiny_unilm_architecture(args)>,\n",
       " 'unilm_xlarge_24L': <function unilm.models.unilm.xlarge_24L_unilm_architecture(args)>,\n",
       " 'unilm_xlarge_24L_post': <function unilm.models.unilm.xlarge_24L_unilm_architecture(args)>,\n",
       " 'unilm_xlarge_48L': <function unilm.models.unilm.xlarge_24L_unilm_architecture(args)>,\n",
       " 'unilm_xlarge_36L': <function unilm.models.unilm.xlarge_24L_unilm_architecture(args)>,\n",
       " 'unilm_gshard': <function fairseq.models.register_model.<locals>.register_model_cls.<locals>.noop(_)>,\n",
       " 'unilm_gshard_base': <function unilm.models.unilm_gshard.base_unilm_architecture(args)>,\n",
       " 'unilm_gshard_xlarge_24L': <function unilm.models.unilm_gshard.xlarge_24L_unilm_architecture(args)>,\n",
       " 'unilm_gshard_xlarge_24L_post': <function unilm.models.unilm_gshard.xlarge_24L_unilm_architecture(args)>,\n",
       " 'unilm_gshard_xlarge_48L': <function unilm.models.unilm_gshard.xlarge_24L_unilm_architecture(args)>,\n",
       " 'unilm_gshard_xlarge_36L': <function unilm.models.unilm_gshard.xlarge_24L_unilm_architecture(args)>,\n",
       " 'unilm_tmoe': <function fairseq.models.register_model.<locals>.register_model_cls.<locals>.noop(_)>,\n",
       " 'unilm_tmoe_base': <function unilm.models.unilm_tmoe.base_unilm_architecture(args)>,\n",
       " 'unilm_tmoe_xlarge_24L': <function unilm.models.unilm_tmoe.xlarge_24L_unilm_architecture(args)>,\n",
       " 'unilm_tmoe_xlarge_24L_post': <function unilm.models.unilm_tmoe.xlarge_24L_unilm_architecture(args)>,\n",
       " 'unilm_tmoe_xlarge_48L': <function unilm.models.unilm_tmoe.xlarge_24L_unilm_architecture(args)>,\n",
       " 'unilm_tmoe_xlarge_36L': <function unilm.models.unilm_tmoe.xlarge_24L_unilm_architecture(args)>,\n",
       " 'unilm_tmoe_xsmall': <function unilm.models.unilm_tmoe.xsmall_unilm_architecture(args)>,\n",
       " 'tmoe': <function unilm.models.moe_legacy.base_architecture(args)>,\n",
       " 'tmoe_tiny': <function unilm.models.moe_legacy.tiny_architecture(args)>,\n",
       " 'tmoe_iwslt_de_en': <function unilm.models.moe_legacy.transformer_iwslt_de_en(args)>,\n",
       " 'tmoe_wmt_en_de': <function unilm.models.moe_legacy.transformer_wmt_en_de(args)>,\n",
       " 'unilm_tmoe_group': <function fairseq.models.register_model.<locals>.register_model_cls.<locals>.noop(_)>,\n",
       " 'unilm_tmoe_group_base': <function unilm.models.unilm_tmoe_group.base_unilm_architecture(args)>,\n",
       " 'unilm_tmoe_group_xlarge_24L': <function unilm.models.unilm_tmoe_group.xlarge_24L_unilm_architecture(args)>,\n",
       " 'unilm_tmoe_group_xlarge_24L_post': <function unilm.models.unilm_tmoe_group.xlarge_24L_unilm_architecture(args)>,\n",
       " 'unilm_tmoe_group_xlarge_48L': <function unilm.models.unilm_tmoe_group.xlarge_24L_unilm_architecture(args)>,\n",
       " 'unilm_tmoe_group_xlarge_36L': <function unilm.models.unilm_tmoe_group.xlarge_24L_unilm_architecture(args)>,\n",
       " 'unilm_tmoe_group_xsmall': <function unilm.models.unilm_tmoe_group.xsmall_unilm_architecture(args)>,\n",
       " 'unilm_gdmoe': <function fairseq.models.register_model.<locals>.register_model_cls.<locals>.noop(_)>,\n",
       " 'unilm_gdmoe_base': <function unilm.models.unilm_gdmoe.base_unilm_architecture(args)>,\n",
       " 'unilm_gdmoe_xlarge_24L': <function unilm.models.unilm_gdmoe.xlarge_24L_unilm_architecture(args)>,\n",
       " 'unilm_gdmoe_xlarge_24L_post': <function unilm.models.unilm_gdmoe.xlarge_24L_unilm_architecture(args)>,\n",
       " 'unilm_gdmoe_xlarge_48L': <function unilm.models.unilm_gdmoe.xlarge_24L_unilm_architecture(args)>,\n",
       " 'unilm_gdmoe_xlarge_36L': <function unilm.models.unilm_gdmoe.xlarge_24L_unilm_architecture(args)>,\n",
       " 'unilm_gdmoe_xsmall': <function unilm.models.unilm_gdmoe.xsmall_unilm_architecture(args)>,\n",
       " 'tmoe_group': <function unilm.models.moe_legacy_group.base_architecture(args)>,\n",
       " 'tmoe_group_tiny': <function unilm.models.moe_legacy_group.tiny_architecture(args)>,\n",
       " 'tmoe_group_iwslt_de_en': <function unilm.models.moe_legacy_group.transformer_iwslt_de_en(args)>,\n",
       " 'tmoe_group_wmt_en_de': <function unilm.models.moe_legacy_group.transformer_wmt_en_de(args)>,\n",
       " 'gdmoe': <function unilm.models.gdmoe_legacy.base_architecture(args)>,\n",
       " 'gdmoe_tiny': <function unilm.models.gdmoe_legacy.tiny_architecture(args)>,\n",
       " 'gdmoe_iwslt_de_en': <function unilm.models.gdmoe_legacy.transformer_iwslt_de_en(args)>,\n",
       " 'gdmoe_wmt_en_de': <function unilm.models.gdmoe_legacy.transformer_wmt_en_de(args)>}"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "fairseq.models.ARCH_CONFIG_REGISTRY"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "开始构建模型："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'_name': 'translation', 'data': '../fairseq/data-bin/wmt17_en_de', 'source_lang': None, 'target_lang': None, 'load_alignments': False, 'left_pad_source': True, 'left_pad_target': False, 'max_source_positions': 1024, 'max_target_positions': 1024, 'upsample_primary': -1, 'truncate_source': False, 'num_batch_buckets': 0, 'train_subset': 'train', 'dataset_impl': None, 'required_seq_len_multiple': 1, 'eval_bleu': False, 'eval_bleu_args': '{}', 'eval_bleu_detok': 'space', 'eval_bleu_detok_args': '{}', 'eval_tokenized_bleu': False, 'eval_bleu_remove_bpe': None, 'eval_bleu_print_samples': False}"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "cfg.task"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'_name': 'cross_entropy', 'sentence_avg': False}"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "cfg.criterion"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "task:tasks.FairseqTask = tasks.setup_task(cfg.task)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Namespace(no_progress_bar=False, log_interval=100, log_format=None, log_file=None, tensorboard_logdir=None, wandb_project=None, azureml_logging=False, seed=1, cpu=False, tpu=False, bf16=False, memory_efficient_bf16=False, fp16=False, memory_efficient_fp16=False, fp16_no_flatten_grads=False, fp16_init_scale=128, fp16_scale_window=None, fp16_scale_tolerance=0.0, on_cpu_convert_precision=False, min_loss_scale=0.0001, threshold_loss_scale=None, amp=False, amp_batch_retries=2, amp_init_scale=128, amp_scale_window=None, user_dir=None, empty_cache_freq=0, all_gather_list_size=16384, model_parallel_size=1, quantization_config_path=None, profile=False, reset_logging=False, suppress_crashes=False, use_plasma_view=False, plasma_path='/tmp/plasma', criterion='cross_entropy', tokenizer=None, bpe=None, optimizer=None, lr_scheduler='fixed', simul_type=None, scoring='bleu', task='translation', num_workers=1, skip_invalid_size_inputs_valid_test=False, max_tokens=None, batch_size=None, required_batch_size_multiple=8, required_seq_len_multiple=1, dataset_impl=None, data_buffer_size=10, train_subset='train', valid_subset='valid', combine_valid_subsets=None, ignore_unused_valid_subsets=False, validate_interval=1, validate_interval_updates=0, validate_after_updates=0, fixed_validation_seed=None, disable_validation=False, max_tokens_valid=None, batch_size_valid=None, max_valid_steps=None, curriculum=0, gen_subset='test', num_shards=1, shard_id=0, distributed_world_size=2, distributed_num_procs=2, distributed_rank=0, distributed_backend='nccl', distributed_init_method=None, distributed_port=-1, device_id=0, distributed_no_spawn=False, ddp_backend='pytorch_ddp', ddp_comm_hook='none', bucket_cap_mb=25, fix_batches_to_gpus=False, find_unused_parameters=False, gradient_as_bucket_view=False, fast_stat_sync=False, heartbeat_timeout=-1, broadcast_buffers=False, slowmo_momentum=None, slowmo_algorithm='LocalSGD', localsgd_frequency=3, nprocs_per_node=2, pipeline_model_parallel=False, pipeline_balance=None, pipeline_devices=None, pipeline_chunks=0, pipeline_encoder_balance=None, pipeline_encoder_devices=None, pipeline_decoder_balance=None, pipeline_decoder_devices=None, pipeline_checkpoint='never', zero_sharding='none', no_reshard_after_forward=False, fp32_reduce_scatter=False, cpu_offload=False, use_sharded_state=False, arch='gdmoe_wmt_en_de', max_epoch=0, max_update=0, stop_time_hours=0, clip_norm=0.0, sentence_avg=False, update_freq=[1], lr=[0.25], stop_min_lr=-1.0, use_bmuf=False, save_dir='checkpoints', restore_file='checkpoint_last.pt', finetune_from_model=None, reset_dataloader=False, reset_lr_scheduler=False, reset_meters=False, reset_optimizer=False, optimizer_overrides='{}', save_interval=1, save_interval_updates=0, keep_interval_updates=-1, keep_interval_updates_pattern=-1, keep_last_epochs=-1, keep_best_checkpoints=-1, no_save=False, no_epoch_checkpoints=False, no_last_checkpoints=False, no_save_optimizer_state=False, best_checkpoint_metric='loss', maximize_best_checkpoint_metric=False, patience=-1, checkpoint_suffix='', checkpoint_shard_count=1, load_checkpoint_on_all_dp_ranks=False, write_checkpoints_asynchronously=False, store_ema=False, ema_decay=0.9999, ema_start_update=0, ema_seed_model=None, ema_update_freq=1, ema_fp32=False, encoder_moe_freq=0, decoder_moe_freq=0, encoder_moe_layers='3', decoder_moe_layers='3', activation_fn='relu', dropout=0.3, attention_dropout=0.1, activation_dropout=0.0, encoder_embed_dim=1024, encoder_output_dim=512, encoder_input_dim=512, encoder_ffn_embed_dim=4096, encoder_layers=6, encoder_attention_heads=16, encoder_normalize_before=False, no_encoder_final_norm=False, no_token_positional_embeddings=False, share_encoder_input_output_embed=False, encoder_learned_pos=False, layernorm_embedding=False, no_scale_embedding=False, checkpoint_activations=False, offload_activations=False, encoder_layerdrop=0.0, encoder_layers_to_keep=None, min_params_to_wrap=100000000, max_target_positions=1024, pooler_activation_fn='relu', pooler_dropout=0.0, task_moe=False, num_experts=2, rel_pos_buckets=0, max_rel_pos=0, rescale_init=False, ffn_layernorm=False, sharded_save=False, alternate_decoder_ffn_embed_dim=0, moe_freq=0, moe_expert_count=64, moe_gating_use_fp32=True, moe_second_expert_policy='sampling', moe_normalize_gate_prob_before_dropping=False, moe_expert_ffn_dim=None, moe_top1_expert=True, moe_eval_capacity_token_fraction=0.25, moe_normalize_expert_grad='world_size', use_moe_pad_mask=False, record_a2a_perf_stats=False, dummy_a2a=False, moe_batch_prioritized_routing=False, use_stable_embedding=False, transformer_moe_layers='', fine_tune_stage=False, fine_tune_stage_restore_model_path='', tmoe_routing_dim_reduction=True, tmoe_routing_dim=32, tmoe_routing_hard_cosine=True, insert_transformer_moe_layers='', moe_sublayers=3, moe_dropout=0.0, moe_activation_dropout=0.0, capacity_factor=2, token_shuffle=False, layer_norm_after_moe=False, exp_level_drop=0.1, dropout_interval=500, var_coef=0.0, group_num=1, coef_type=1, data='../fairseq/data-bin/wmt17_en_de', source_lang=None, target_lang=None, load_alignments=False, left_pad_source=True, left_pad_target=False, max_source_positions=1024, upsample_primary=-1, truncate_source=False, num_batch_buckets=0, eval_bleu=False, eval_bleu_args='{}', eval_bleu_detok='space', eval_bleu_detok_args='{}', eval_tokenized_bleu=False, eval_bleu_remove_bpe=None, eval_bleu_print_samples=False, force_anneal=None, lr_shrink=0.1, warmup_updates=0, pad=1, eos=2, unk=3, no_seed_provided=False, decoder_embed_dim=1024, decoder_ffn_embed_dim=4096, decoder_attention_heads=16, encoder_embed_path=None, decoder_embed_path=None, decoder_layers=6, decoder_normalize_before=False, decoder_learned_pos=False, adaptive_softmax_cutoff=None, adaptive_softmax_dropout=0, share_decoder_input_output_embed=False, share_all_embeddings=False, adaptive_input=False, no_cross_attention=False, cross_self_attention=False, decoder_output_dim=1024, decoder_input_dim=1024, tie_adaptive_weights=False, decoder_layers_to_keep=None, decoder_layerdrop=0, quant_noise_pq=0, quant_noise_pq_block_size=8, quant_noise_scalar=0, _name='gdmoe_wmt_en_de')"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "cfg.model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = task.build_model(cfg.model)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "criterion = task.build_criterion(cfg.criterion)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "可以看到fairseq也注册了很多loss funciton."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'adaptive_loss': fairseq.criterions.adaptive_loss.AdaptiveLoss,\n",
       " 'composite_loss': fairseq.criterions.composite_loss.CompositeLoss,\n",
       " 'cross_entropy': fairseq.criterions.cross_entropy.CrossEntropyCriterion,\n",
       " 'ctc': fairseq.criterions.ctc.CtcCriterion,\n",
       " 'fastspeech2': fairseq.criterions.fastspeech2_loss.FastSpeech2Loss,\n",
       " 'hubert': fairseq.criterions.hubert_criterion.HubertCriterion,\n",
       " 'label_smoothed_cross_entropy': fairseq.criterions.label_smoothed_cross_entropy.LabelSmoothedCrossEntropyCriterion,\n",
       " 'latency_augmented_label_smoothed_cross_entropy': fairseq.criterions.label_smoothed_cross_entropy_latency_augmented.LatencyAugmentedLabelSmoothedCrossEntropyCriterion,\n",
       " 'label_smoothed_cross_entropy_with_alignment': fairseq.criterions.label_smoothed_cross_entropy_with_alignment.LabelSmoothedCrossEntropyCriterionWithAlignment,\n",
       " 'legacy_masked_lm_loss': fairseq.criterions.legacy_masked_lm.LegacyMaskedLmLoss,\n",
       " 'masked_lm': fairseq.criterions.masked_lm.MaskedLmLoss,\n",
       " 'model': fairseq.criterions.model_criterion.ModelCriterion,\n",
       " 'nat_loss': fairseq.criterions.nat_loss.LabelSmoothedDualImitationCriterion,\n",
       " 'sentence_prediction': fairseq.criterions.sentence_prediction.SentencePredictionCriterion,\n",
       " 'sentence_ranking': fairseq.criterions.sentence_ranking.SentenceRankingCriterion,\n",
       " 'tacotron2': fairseq.criterions.tacotron2_loss.Tacotron2Criterion,\n",
       " 'wav2vec': fairseq.criterions.wav2vec_criterion.Wav2vecCriterion,\n",
       " 'vocab_parallel_cross_entropy': fairseq.model_parallel.criterions.vocab_parallel_cross_entropy.VocabParallelCrossEntropyCriterion,\n",
       " 'label_smoothed_cross_entropy_moe': unilm.criterions.label_smoothed_cross_entropy_moe.LabelSmoothedCrossEntropyCriterion,\n",
       " 'unilm': unilm.criterions.unilm.UniLmLoss,\n",
       " 'unilm_moe': unilm.criterions.unilm_moe.UniLmMoeLoss}"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "fairseq.criterions.CRITERION_REGISTRY"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "看看参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "task: TranslationTask\n",
      "model: GDMoEModel\n",
      "criterion: CrossEntropyCriterion\n",
      "num. shared model params: 288,458,754 (num. trained: 288,458,754)\n",
      "num. expert model params: 3,223,977,984 (num. trained: 3,223,977,984)\n"
     ]
    }
   ],
   "source": [
    "print(\"task: {}\".format(task.__class__.__name__))\n",
    "print(\"model: {}\".format(model.__class__.__name__))\n",
    "print(\"criterion: {}\".format(criterion.__class__.__name__))\n",
    "print(\n",
    "    \"num. shared model params: {:,} (num. trained: {:,})\".format(\n",
    "        sum(p.numel() for p in model.parameters() if not getattr(p, \"expert\", False)),\n",
    "        sum(p.numel() for p in model.parameters() if not getattr(p, \"expert\", False) and p.requires_grad)\n",
    "    )\n",
    ")\n",
    "\n",
    "print(\n",
    "    \"num. expert model params: {:,} (num. trained: {:,})\".format(\n",
    "        sum(p.numel() for p in model.parameters() if getattr(p, \"expert\", False)),\n",
    "        sum(p.numel() for p in model.parameters() if getattr(p, \"expert\", False) and p.requires_grad),\n",
    "    )\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "可以看到，模型主干(backbone)有288M参数，然后Expert参数占了3.2B."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "GDMoEModel(\n",
       "  (encoder): TransformerEncoderMoEBase(\n",
       "    (dropout_module): FairseqDropout()\n",
       "    (embed_tokens): Embedding(40360, 1024, padding_idx=1)\n",
       "    (embed_positions): SinusoidalPositionalEmbedding()\n",
       "    (layers): ModuleList(\n",
       "      (0-2): 3 x TransformerEncoderLayerBase(\n",
       "        (self_attn): MultiheadAttention(\n",
       "          (dropout_module): FairseqDropout()\n",
       "          (k_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          (v_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          (q_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          (out_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "        )\n",
       "        (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
       "        (dropout_module): FairseqDropout()\n",
       "        (activation_dropout_module): FairseqDropout()\n",
       "        (fc1): Linear(in_features=1024, out_features=4096, bias=True)\n",
       "        (fc2): Linear(in_features=4096, out_features=1024, bias=True)\n",
       "        (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
       "      )\n",
       "      (3): UniLMMoeLayer(\n",
       "        (dropout_module): FairseqDropout()\n",
       "        (self_attn): MultiheadAttention(\n",
       "          (dropout_module): FairseqDropout()\n",
       "          (k_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          (v_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          (q_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          (out_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "        )\n",
       "        (moe_layer): MOELayer(\n",
       "          (gate): Top1Gate(\n",
       "            (wg_reduction): Linear(in_features=1024, out_features=32, bias=False)\n",
       "            (wg): Linear(in_features=32, out_features=64, bias=False)\n",
       "          )\n",
       "          (experts): ModuleList(\n",
       "            (0-63): 64 x NFeedForwardNetwork(\n",
       "              (expert_network): ModuleList(\n",
       "                (0-2): 3 x MoESublayer(\n",
       "                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
       "                  (dropout_module): FairseqDropout()\n",
       "                  (activation_dropout_module): FairseqDropout()\n",
       "                  (fc1): Linear(in_features=1024, out_features=4096, bias=True)\n",
       "                  (fc2): Linear(in_features=4096, out_features=1024, bias=True)\n",
       "                )\n",
       "              )\n",
       "            )\n",
       "          )\n",
       "        )\n",
       "        (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
       "        (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
       "      )\n",
       "      (4-5): 2 x TransformerEncoderLayerBase(\n",
       "        (self_attn): MultiheadAttention(\n",
       "          (dropout_module): FairseqDropout()\n",
       "          (k_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          (v_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          (q_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          (out_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "        )\n",
       "        (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
       "        (dropout_module): FairseqDropout()\n",
       "        (activation_dropout_module): FairseqDropout()\n",
       "        (fc1): Linear(in_features=1024, out_features=4096, bias=True)\n",
       "        (fc2): Linear(in_features=4096, out_features=1024, bias=True)\n",
       "        (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
       "      )\n",
       "    )\n",
       "  )\n",
       "  (decoder): TransformerDecoderMoEBase(\n",
       "    (dropout_module): FairseqDropout()\n",
       "    (embed_tokens): Embedding(42720, 1024, padding_idx=1)\n",
       "    (embed_positions): SinusoidalPositionalEmbedding()\n",
       "    (layers): ModuleList(\n",
       "      (0-2): 3 x TransformerDecoderLayerBase(\n",
       "        (dropout_module): FairseqDropout()\n",
       "        (self_attn): MultiheadAttention(\n",
       "          (dropout_module): FairseqDropout()\n",
       "          (k_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          (v_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          (q_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          (out_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "        )\n",
       "        (activation_dropout_module): FairseqDropout()\n",
       "        (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
       "        (encoder_attn): MultiheadAttention(\n",
       "          (dropout_module): FairseqDropout()\n",
       "          (k_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          (v_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          (q_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          (out_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "        )\n",
       "        (encoder_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
       "        (fc1): Linear(in_features=1024, out_features=4096, bias=True)\n",
       "        (fc2): Linear(in_features=4096, out_features=1024, bias=True)\n",
       "        (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
       "      )\n",
       "      (3): TransformerDecoderLayerMoEBase(\n",
       "        (dropout_module): FairseqDropout()\n",
       "        (self_attn): MultiheadAttention(\n",
       "          (dropout_module): FairseqDropout()\n",
       "          (k_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          (v_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          (q_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          (out_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "        )\n",
       "        (activation_dropout_module): FairseqDropout()\n",
       "        (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
       "        (encoder_attn): MultiheadAttention(\n",
       "          (dropout_module): FairseqDropout()\n",
       "          (k_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          (v_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          (q_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          (out_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "        )\n",
       "        (encoder_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
       "        (moe_layer): MOELayer(\n",
       "          (gate): Top1Gate(\n",
       "            (wg_reduction): Linear(in_features=1024, out_features=32, bias=False)\n",
       "            (wg): Linear(in_features=32, out_features=64, bias=False)\n",
       "          )\n",
       "          (experts): ModuleList(\n",
       "            (0-63): 64 x NFeedForwardNetwork(\n",
       "              (expert_network): ModuleList(\n",
       "                (0-2): 3 x MoESublayer(\n",
       "                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
       "                  (dropout_module): FairseqDropout()\n",
       "                  (activation_dropout_module): FairseqDropout()\n",
       "                  (fc1): Linear(in_features=1024, out_features=4096, bias=True)\n",
       "                  (fc2): Linear(in_features=4096, out_features=1024, bias=True)\n",
       "                )\n",
       "              )\n",
       "            )\n",
       "          )\n",
       "        )\n",
       "        (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
       "      )\n",
       "      (4-5): 2 x TransformerDecoderLayerBase(\n",
       "        (dropout_module): FairseqDropout()\n",
       "        (self_attn): MultiheadAttention(\n",
       "          (dropout_module): FairseqDropout()\n",
       "          (k_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          (v_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          (q_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          (out_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "        )\n",
       "        (activation_dropout_module): FairseqDropout()\n",
       "        (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
       "        (encoder_attn): MultiheadAttention(\n",
       "          (dropout_module): FairseqDropout()\n",
       "          (k_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          (v_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          (q_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "          (out_proj): Linear(in_features=1024, out_features=1024, bias=True)\n",
       "        )\n",
       "        (encoder_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
       "        (fc1): Linear(in_features=1024, out_features=4096, bias=True)\n",
       "        (fc2): Linear(in_features=4096, out_features=1024, bias=True)\n",
       "        (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
       "      )\n",
       "    )\n",
       "    (output_projection): Linear(in_features=1024, out_features=42720, bias=False)\n",
       "  )\n",
       ")"
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 坏了！并不是每层都有MoE！\n",
    "\n",
    "- 在Transformer的Encoder和Decoder的第四层是MoE层，其余的都不是MoE层。\n",
    "\n",
    "- 看看源码`unilm/models/gdmoe_legacy.py`\n",
    "\n",
    "    ```python\n",
    "    args.encoder_embed_dim = 1024\n",
    "    args.encoder_ffn_embed_dim = 4096\n",
    "    args.encoder_attention_heads = 16\n",
    "    args.encoder_normalize_before = False\n",
    "    args.decoder_embed_dim = 1024\n",
    "    args.decoder_ffn_embed_dim = 4096\n",
    "    args.decoder_attention_heads = 16\n",
    "    args.dropout = 0.3\n",
    "    args.attention_dropout = 0.1\n",
    "    ```\n",
    "    \n",
    "    我们可以看到是可以跟上面model的参数对上的\n",
    "\n",
    "    ```python\n",
    "    class TransformerEncoderMoEBase(TransformerEncoderBase):\n",
    "        def __init__(self, cfg, dictionary, embed_tokens):\n",
    "            super().__init__(cfg, dictionary, embed_tokens)\n",
    "\n",
    "            # rebuild encoder layer\n",
    "            if self.encoder_layerdrop > 0.0:\n",
    "                self.layers = LayerDropModuleList(p=self.encoder_layerdrop)\n",
    "            else:\n",
    "                self.layers = nn.ModuleList([])\n",
    "            moe_freq = max(getattr(cfg, 'encoder_moe_freq', 0), getattr(cfg, 'moe_freq', 0))\n",
    "            # for 循环构建encoder\n",
    "            for i in range(cfg.encoder_layers):\n",
    "                # 判断是不是moe_layer\n",
    "                # 由于我们指定 --encoder-moe-layers 3， 第三层(实际上是index，从1开始是第四层)就是MoE层\n",
    "                if cfg.encoder_moe_layers:\n",
    "                    transformer_moe_layers = cfg.encoder_moe_layers.split(',')\n",
    "                    is_moe_layer = str(i) in transformer_moe_layers\n",
    "                else:\n",
    "                    is_moe_layer = moe_freq != 0 and (i + 1) % moe_freq == 0\n",
    "                # 最后将is_moe_layer变量传入构建函数里面\n",
    "                self.layers.append(self.build_encoder_layer(cfg, is_moe_layer=is_moe_layer))\n",
    "\n",
    "            self.num_layers = len(self.layers)\n",
    "\n",
    "        # 这是构建单个Encoder Layer的函数，需要传入config和是否是MoE层\n",
    "        def build_encoder_layer(self, cfg, is_moe_layer=False):\n",
    "            if is_moe_layer:\n",
    "                layer = UniLMMoeLayer(cfg)\n",
    "            else:\n",
    "                layer = transformer_layer.TransformerEncoderLayerBase(cfg)\n",
    "            checkpoint = cfg.checkpoint_activations\n",
    "            if checkpoint:\n",
    "                offload_to_cpu = cfg.offload_activations\n",
    "                layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu)\n",
    "            # if we are checkpointing, enforce that FSDP always wraps the\n",
    "            # checkpointed layer, regardless of layer size\n",
    "            min_params_to_wrap = cfg.min_params_to_wrap if not checkpoint else 0\n",
    "            layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap)\n",
    "            return layer\n",
    "    ```\n",
    "\n",
    "- 同样地，Decoder与Encoder的构建函数类似。\n",
    "- 具体怎么构建的，推荐在构建函数上面（就在这个ipynb上面打断点），看看到底怎么执行的"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "moe",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
