# Copyright 2024 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Unused arguments recorded here."""


UNUSED_MODEL_PARALLEL_CONFIG = {
    "moe_extended_tp",
    "perform_initialization",
    "timers",
    "finalize_model_grads_func",
    "grad_scale_func",
    "no_sync_func",
    "grad_sync_func",
    "param_sync_func",
    "enable_autocast",
    "autocast_dtype",
    "num_microbatches_with_partial_activation_checkpoints",
    "async_tensor_model_parallel_allreduce",
    "use_te_rng_tracker",
    "tp_comm_overlap",
    "tp_comm_bulk_wgrad",
    "tp_comm_bulk_dgrad",
    "tp_comm_overlap_ag",
    "tp_comm_overlap_rs",
    "tp_comm_overlap_rs_dgrad",
    "tp_comm_split_ag",
    "tp_comm_atomic_ag",
    "tp_comm_split_rs",
    "tp_comm_atomic_rs",
    "cross_entropy_loss_fusion",
    "batch_p2p_comm",
    "batch_p2p_sync",
    "use_ring_exchange_p2p",
    "deallocate_pipeline_outputs",
    "defer_embedding_wgrad_compute",
    "wgrad_deferral_limit",
    "pipeline_model_parallel_split_rank",
    "cpu_offloading",
    "cpu_offloading_num_layers",
    "_cpu_offloading_context",
    "cpu_offloading_activations",
    "cpu_offloading_weights",
    "barrier_with_L1_time"
}


UNUSED_TRANSFORMER_CONFIG = {
    "activation_func_fp8_input_store",
    "qk_layernorm",
    "bias_activation_fusion",
    "persist_layer_norm",
    "memory_efficient_layer_norm",
    "fp8_margin",
    "fp8_interval",
    "fp8_amax_history_len",
    "fp8_amax_compute_algo",
    "fp8_wgrad",
    "fp8_dot_product_attention",
    "fp8_multi_head_attention",
    "moe_router_pre_softmax",
    "moe_token_dropping",
    "moe_per_layer_logging",
    "moe_layer_recompute",
    "disable_parameter_transpose_cache",
    "enable_cuda_graph"
}


UNUSED_GLOBAL_ARGS = {
    "add_position_embedding",
    "make_vocab_size_divisible_by",
    "squared_relu",
    "bert_binary_head",
    "log_straggler",
    "disable_straggler_on_startup",
    "straggler_ctrlr_port",
    "straggler_minmax_count",
    "enable_one_logger",
    "one_logger_project",
    "one_logger_run_name",
    "one_logger_async",
    "app_tag_run_name",
    "app_tag_run_version",
    "log_params_norm",
    "log_num_zeros_in_grad",
    "log_throughput",
    "log_progress",
    "timing_log_level",
    "timing_log_option",
    "tensorboard_log_interval",
    "tensorboard_queue_size",
    "log_timers_to_tensorboard",
    "log_batch_size_to_tensorboard",
    "log_learning_rate_to_tensorboard",
    "log_loss_scale_to_tensorboard",
    "log_validation_ppl_to_tensorboard",
    "log_memory_to_tensorboard",
    "log_world_size_to_tensorboard",
    "wandb_project",
    "wandb_exp_name",
    "wandb_save_dir",
    "adam_eps",
    "sgd_momentum",
    "batch_size",
    "rampup_batch_size",
    "recompute_activations",
    "check_for_nan_in_loss_and_grad",
    "tp_comm_overlap_cfg",
    "empty_unused_memory_level",
    "check_weight_hash_across_dp_replicas_interval",
    "checkpoint_activations",
    "exit_interval",
    "exit_signal_handler",
    "tensorboard_dir",
    "bias_gelu_fusion",
    "bias_swiglu_fusion",
    "dataloader_type",
    "no_persist_layer_norm",
    "manual_gc",
    "manual_gc_interval",
    "manual_gc_eval",
    "data_parallel_random_init",
    "init_method_xavier_uniform",
    "warmup",
    "decoupled_lr",
    "decoupled_min_lr",
    "ckpt_step",
    "perform_initialization",
    "use_checkpoint_args",
    "auto_detect_ckpt_format",
    "ckpt_fully_parallel_save",
    "no_ckpt_fully_parallel_save",
    "async_save",
    "ckpt_fully_parallel_load",
    "ckpt_assume_constant_structure",
    "dist_ckpt_strictness",
    "min_loss_scale",
    "model_parallel_size",
    "num_layers_per_virtual_pipeline_stage",
    "distributed_backend",
    "distributed_timeout_minutes",
    "delay_grad_reduce",
    "delay_param_gather",
    "scatter_gather_tensors_in_pipeline",
    "use_ring_exchange_p2p",
    "local_rank",
    "lazy_mpu_init",
    "nccl_communicator_config_path",
    "use_tp_pp_dp_mapping",
    "skip_train",
    "split",
    "train_data_path",
    "valid_data_path",
    "test_data_path",
    "data_cache_path",
    "mmap_bin_files",
    "mock_data",
    "vocab_file",
    "merge_file",
    "vocab_extra_ids",
    "retriever_seq_length",
    "sample_rate",
    "mask_prob",
    "short_seq_prob",
    "num_workers",
    "tokenizer_type",
    "tokenizer_model",
    "tiktoken_pattern",
    "tiktoken_num_special_tokens",
    "tiktoken_special_tokens",
    "reset_position_ids",
    "reset_attention_mask",
    "eod_mask_loss",
    "create_attention_mask_in_dataloader",
    "num_dataset_builder_threads",
    "s3_cache_path",
    "adlr_autoresume",
    "adlr_autoresume_interval",
    "ict_head_size",
    "biencoder_projection_dim",
    "biencoder_shared_query_context_model",
    "ict_load",
    "bert_load",
    "titles_data_path",
    "query_in_block_prob",
    "use_one_sent_docs",
    "evidence_data_path",
    "retriever_report_topk_accuracies",
    "retriever_score_scaling",
    "block_data_path",
    "embedding_path",
    "indexer_batch_size",
    "indexer_log_interval",
    "num_classes",
    "img_h",
    "img_w",
    "num_channels",
    "patch_dim",
    "classes_fraction",
    "data_per_class_fraction",
    "data_sharding",
    "head_lr_mult",
    "vision_pretraining",
    "vision_pretraining_type",
    "vision_backbone_type",
    "swin_backbone_type",
    "mask_type",
    "mask_factor",
    "iter_per_epoch",
    "dino_local_img_size",
    "dino_local_crops_number",
    "dino_head_hidden_size",
    "dino_bottleneck_size",
    "dino_freeze_last_layer",
    "dino_norm_last_layer",
    "dino_warmup_teacher_temp",
    "dino_teacher_temp",
    "dino_warmup_teacher_temp_epochs",
    "spec",
    "hybrid_attention_ratio",
    "hybrid_mlp_ratio",
    "hybrid_override_pattern"
}
