# Copyright 2024 the LlamaFactory team.
# Copyright (c) 2024 Huawei Technologies Co., Ltd.
# This code is inspired by the LlamaFactory's cli feature.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


import os
import subprocess
import sys
import random

from openmind.utils.constants import COMMANDS
from openmind.cli.chat import run_chat
from openmind.cli.env import run_env
from openmind.archived.cli_legacy.model_cli import run_pull, run_push, run_rm, run_list
from openmind.archived.cli_legacy.pipeline_cli import run_pipeline
from openmind.utils import is_torch_available
from openmind.utils.arguments_utils import print_formatted_table

# Compatible with MindSpore
if is_torch_available():
    import torch
    from accelerate import PartialState
    from openmind.cli import train
    from openmind.cli.export import run_export
    from openmind.cli.deploy import run_deploy
    from openmind.cli.eval import run_eval


def get_device_count():
    state = PartialState()
    device_module = getattr(torch, state.device.type.lower(), None)
    if device_module and hasattr(device_module, "device_count"):
        return device_module.device_count()
    return 0


def run_train():
    if get_device_count() >= 1:
        master_addr = os.environ.get("MASTER_ADDR", "127.0.0.1")
        master_port = os.environ.get("MASTER_PORT", str(random.randint(20001, 29999)))
        command = [
            "torchrun",
            "--nnodes",
            os.environ.get("NNODES", "1"),
            "--node_rank",
            os.environ.get("RANK", "0"),
            "--nproc_per_node",
            os.environ.get("NPROC_PER_NODE", str(get_device_count())),
            "--master_addr",
            master_addr,
            "--master_port",
            master_port,
            train.__file__,
        ] + sys.argv[2::]
        subprocess.run(command)
    else:
        raise ValueError("There is no npu devices to launch finetune workflow")


def print_help():
    header = ["Commands", "Description"]
    commands_info = [[info.cmd, info.desc] for info in COMMANDS.values()]
    print_formatted_table(commands_info, header)


def main():
    command_cli = sys.argv[1] if len(sys.argv) != 1 else None
    if command_cli == COMMANDS["TRAIN"].cmd:
        run_train()
    elif command_cli == COMMANDS["LIST"].cmd:
        run_list()
    elif command_cli == COMMANDS["EVAL"].cmd:
        run_eval()
    elif command_cli == COMMANDS["PULL"].cmd:
        run_pull()
    elif command_cli == COMMANDS["PUSH"].cmd:
        run_push()
    elif command_cli == COMMANDS["RM"].cmd:
        run_rm()
    elif command_cli == COMMANDS["CHAT"].cmd:
        run_chat()
    elif command_cli == COMMANDS["RUN"].cmd:
        run_pipeline()
    elif command_cli == COMMANDS["ENV"].cmd:
        run_env()
    elif command_cli == COMMANDS["DEPLOY"].cmd:
        run_deploy()
    elif command_cli == COMMANDS["EXPORT"].cmd:
        run_export()
    elif not command_cli:
        print_help()
    else:
        print_help()
        raise ValueError(
            f"Currently command {command_cli} is not supported. Please refer to the table above to provide the correct command."
        )


if __name__ == "__main__":
    main()
