# Copyright 2024 the LlamaFactory team.
# Copyright (c) 2024 Huawei Technologies Co., Ltd.
# This code is inspired by the LlamaFactory's cli feature.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


import os
import subprocess
import sys
import random

from openmind.utils.constants import Command
from openmind.cli.chat import run_chat
from openmind.cli.env import run_env
from openmind.archived.cli_legacy.model_cli import run_pull, run_push, run_rm, run_list
from openmind.archived.cli_legacy.pipeline_cli import run_pipeline
from openmind.utils import is_torch_available

# Compatible with MindSpore
if is_torch_available():
    import torch
    from accelerate import PartialState
    from openmind.cli import train
    from openmind.cli.export import run_export
    from openmind.cli.deploy import run_deploy
    from openmind.cli.eval.eval import run_eval


def get_device_count():
    state = PartialState()
    device_module = getattr(torch, state.device.type.lower(), None)
    if device_module and hasattr(device_module, "device_count"):
        return device_module.device_count()
    return 0


def main():
    command_cli = sys.argv[1]
    if command_cli == Command.TRAIN:
        if get_device_count() >= 1:
            master_addr = os.environ.get("MASTER_ADDR", "127.0.0.1")
            master_port = os.environ.get("MASTER_PORT", str(random.randint(20001, 29999)))
            command = [
                "torchrun",
                "--nnodes",
                os.environ.get("NNODES", "1"),
                "--node_rank",
                os.environ.get("RANK", "0"),
                "--nproc_per_node",
                os.environ.get("NPROC_PER_NODE", str(get_device_count())),
                "--master_addr",
                master_addr,
                "--master_port",
                master_port,
                train.__file__,
            ] + sys.argv[2::]
            subprocess.run(command)
        else:
            raise ValueError("There is no npu devices to launch finetune workflow")
    elif command_cli == Command.LIST:
        run_list()
    elif command_cli == Command.EVAL:
        run_eval()
    elif command_cli == Command.PULL:
        run_pull()
    elif command_cli == Command.PUSH:
        run_push()
    elif command_cli == Command.RM:
        run_rm()
    elif command_cli == Command.CHAT:
        run_chat()
    elif command_cli == Command.RUN:
        run_pipeline()
    elif command_cli == Command.ENV:
        run_env()
    elif command_cli == Command.DEPLOY:
        run_deploy()
    elif command_cli == Command.EXPORT:
        run_export()
    else:
        raise ValueError(f"Currently command {command_cli} is not supported")


if __name__ == "__main__":
    main()
