# Copyright (c) 2025, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


from nemo.collections.llm.recipes import (
    baichuan2_7b,
    bert_110m,
    bert_340m,
    chatglm3_6b,
    deepseek_v2,
    deepseek_v2_lite,
    deepseek_v3,
    e5_340m,
    gemma2,
    gemma2_2b,
    gemma2_9b,
    gemma2_27b,
    gemma3_1b,
    gemma_2b,
    gemma_7b,
    gpt3_175b,
    gpt_oss_20b,
    gpt_oss_120b,
    hyena_1b,
    hyena_7b,
    hyena_40b,
    hyena_base,
    llama2_7b,
    llama3_8b,
    llama3_8b_16k,
    llama3_8b_64k,
    llama3_8b_128k,
    llama3_70b,
    llama3_70b_16k,
    llama3_70b_64k,
    llama4_e16,
    llama4_e128,
    llama31_8b,
    llama31_70b,
    llama31_405b,
    llama31_nemotron_70b,
    llama31_nemotron_nano_8b,
    llama31_nemotron_ultra_253b,
    llama32_1b,
    llama32_3b,
    llama33_nemotron_super_49b,
    llama_embedding_1b,
    llama_embedding_3b,
    llama_reranker_1b,
    mamba2_1_3b,
    mamba2_2_7b,
    mamba2_8b,
    mamba2_130m,
    mamba2_370m,
    mamba2_780m,
    mamba2_hybrid_8b,
    mistral_7b,
    mistral_nemo_12b,
    mistral_small3_24b,
    mixtral_8x7b,
    mixtral_8x7b_16k,
    mixtral_8x7b_64k,
    mixtral_8x22b,
    mixtral_8x22b_64k,
    nemotron,
    nemotron3_4b,
    nemotron3_8b,
    nemotron3_22b,
    nemotron3_22b_16k,
    nemotron3_22b_64k,
    nemotron4_15b,
    nemotron4_15b_16k,
    nemotron4_15b_64k,
    nemotron4_340b,
    nemotron_nano_9b_v2,
    nemotron_nano_12b_v2,
    nemotronh_4b,
    nemotronh_8b,
    nemotronh_47b,
    nemotronh_56b,
    phi3_mini_4k_instruct,
    qwen2,
    qwen2_1p5b,
    qwen2_7b,
    qwen2_72b,
    qwen2_500m,
    qwen3,
    qwen3_1p7b,
    qwen3_4b,
    qwen3_8b,
    qwen3_14b,
    qwen3_30b_a3b,
    qwen3_32b,
    qwen3_235b_a22b,
    qwen3_600m,
    qwen25_1p5b,
    qwen25_7b,
    qwen25_14b,
    qwen25_32b,
    qwen25_72b,
    qwen25_500m,
    starcoder2,
    starcoder2_3b,
    starcoder2_7b,
    starcoder2_15b,
    starcoder_15b,
    t5_3b,
    t5_11b,
    t5_220m,
)
from nemo.collections.llm.recipes.log.default import default_log, default_resume
from nemo.collections.llm.recipes.optim import adam, sgd
from nemo.collections.llm.recipes.run.executor import torchrun

__all__ = [
    "baichuan2_7b",
    "bert_110m",
    "bert_340m",
    "chatglm3_6b",
    "deepseek_v2",
    "deepseek_v2_lite",
    "deepseek_v3",
    "e5_340m",
    "gemma_2b",
    "gemma_7b",
    "llama2_7b",
    "llama3_8b",
    "llama3_8b_16k",
    "llama3_8b_64k",
    "llama3_8b_128k",
    "llama3_70b",
    "llama3_70b_16k",
    "llama3_70b_64k",
    "llama31_8b",
    "llama31_70b",
    "llama31_405b",
    "llama32_1b",
    "llama32_3b",
    "llama4_e16",
    "llama4_e128",
    "llama31_nemotron_nano_8b",
    "llama33_nemotron_super_49b",
    "llama31_nemotron_ultra_253b",
    "llama31_nemotron_70b",
    "llama_embedding_1b",
    "llama_embedding_3b",
    "llama_reranker_1b",
    "mamba2_130m",
    "mamba2_370m",
    "mamba2_780m",
    "mamba2_1_3b",
    "mamba2_2_7b",
    "mamba2_8b",
    "mamba2_hybrid_8b",
    "nemotronh_4b",
    "nemotronh_8b",
    "nemotronh_47b",
    "nemotronh_56b",
    "nemotron_nano_9b_v2",
    "nemotron_nano_12b_v2",
    "mistral_7b",
    "mistral_nemo_12b",
    "mistral_small3_24b",
    "hyena_base",
    "hyena_1b",
    "hyena_7b",
    "hyena_40b",
    "mixtral_8x7b",
    "mixtral_8x7b_16k",
    "mixtral_8x7b_64k",
    "mixtral_8x22b",
    "mixtral_8x22b_64k",
    "nemotron",
    "nemotron3_4b",
    "nemotron3_8b",
    "nemotron3_22b",
    "nemotron3_22b_16k",
    "nemotron3_22b_64k",
    "nemotron4_15b",
    "nemotron4_15b_16k",
    "nemotron4_15b_64k",
    "nemotron4_340b",
    "gpt_oss_120b",
    "gpt_oss_20b",
    "phi3_mini_4k_instruct",
    "t5_220m",
    "t5_3b",
    "t5_11b",
    "starcoder_15b",
    "starcoder2",
    "starcoder2_3b",
    "starcoder2_7b",
    "starcoder2_15b",
    "qwen2",
    "qwen2_500m",
    "qwen2_1p5b",
    "qwen2_7b",
    "qwen2_72b",
    "qwen25_500m",
    "qwen25_1p5b",
    "qwen25_7b",
    "qwen25_14b",
    "qwen25_32b",
    "qwen25_72b",
    "qwen3",
    "qwen3_600m",
    "qwen3_1p7b",
    "qwen3_4b",
    "qwen3_8b",
    "qwen3_14b",
    "qwen3_32b",
    "qwen3_30b_a3b",
    "qwen3_235b_a22b",
    "gpt3_175b",
    "gemma2",
    "gemma2_2b",
    "gemma2_9b",
    "gemma2_27b",
    "gemma3_1b",
    "adam",
    "sgd",
    "default_log",
    "default_resume",
    "torchrun",
]
