# Copyright 2024 the LlamaFactory team.
# Copyright (c) 2024 Huawei Technologies Co., Ltd.
#
# This code is inspired by the LLaMA-Factory.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import sys
import os
from openmind.flow.arguments import initialize_openmind, get_args
from openmind.flow.chat.chat_model import _start_chat_without_docker


def run_chat(**kwargs) -> None:

    if len(sys.argv) == 3 and sys.argv[-1].endswith("yaml"):
        # openmind-cli chat chat_demo.yaml
        yaml_file = sys.argv[-1]
        initialize_openmind(yaml_file)
    elif len(sys.argv) > 2 and ("/" in sys.argv[2] or os.path.exists(sys.argv[2])):
        # openmind-cli chat AI_Connect/Qwen2-0.5B or openmind-cli chat AI_Connect/Qwen2-0.5B --do_sample True
        initialize_openmind(ignore_unknown_args=True)
    else:
        # openmind-cli chat --model_name_or_path AI_Connect/Qwen2-0.5B --do_sample True
        initialize_openmind(**kwargs)

    args = get_args()

    _start_chat_without_docker(args)
