# encoding: utf-8
"""
llm服务启动
"""
import logging
import subprocess

from ...config.base import (LOG_LLM_SERVER_DIR, PYTHON_VENV, USE_VLLM, LLM_CUDA_VISIBLE_DEVICES)

LOG_FORMAT = "%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s"
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logging.basicConfig(format=LOG_FORMAT)

SERVER_WORKER_PARAMS = {
    'controller_host': '0.0.0.0',
    'controller_port': '21001',
    'worker_host': '0.0.0.0',
    'worker_port': '21002',
    'server_host': '0.0.0.0',
    'server_port': '8000',
    'gradio_host': '0.0.0.0',
    'gradio_port': '8002',
    'num_gpus': 1,
    'device':  'gpu',
    'load_8bit': False,
}

'''
0 python evn
1 controller, model_worker, openai_api_server, vllm_worker
2 命令行选项
3 LOG_PATH
4 log的文件名
'''
base_launch_sh = "nohup {0} -m fastchat.serve.{1} {2} > {3}/{4}.log 2>&1 &"


'''
0 log_path
1 log的文件名，必须与bash_launch_sh一致
2 controller, worker, openai_api_server
'''
base_check_sh = """while [ `grep -c "Uvicorn running on" {0}/{1}.log` -eq '0' ];do
                        sleep 5s;
                        echo "wait {2} running"
                done
                echo '{2} running' """


def string_args(args):
    string_list = []
    for key, value in args.items():
        parse_string = '--' + '-'.join(key.split('_')) + ' ' + str(value)
        string_list.append(parse_string)
    strings = ' '.join(string_list)
    return strings


def launch_worker(idx, model_name, cuda_visible_devices=LLM_CUDA_VISIBLE_DEVICES):
    """
    :param idx: worker 编号 启动多少个编码
    :param model_name:
    :param cuda_visible_devices: gpu编号
    :return:
    """
    local_model_path = LLM_MODEL_DIR + '/' + model_name
    worker_params = {
        'host': SERVER_WORKER_PARAMS['worker_host'],
        'port': SERVER_WORKER_PARAMS['worker_port'],
        'model_path': local_model_path,
    }
    worker_cli = 'vllm_worker' if USE_VLLM else 'model_worker'
    log_name = 'log_worker_' + model_name.split('/')[-1] + '_' + str(idx)
    server_args = string_args(worker_params) + ' --limit-worker-concurrency 20'
    server_dtype = ' --dtype bfloat16' if 'Int' not in model_name else ' --dtype float16'
    # server_args += ' --trust-remote-code ' + ' --gpu-memory-utilization 0.8' + ' --max-model-len 8000' + ' --max-num-batched-tokens 160000' + server_dtype if USE_VLLM else server_dtype
    server_args += ' --trust-remote-code ' + ' --gpu-memory-utilization 0.8' + ' --max-model-len 6000' + server_dtype if USE_VLLM else server_dtype
    print(server_args)
    worker_sh = base_launch_sh.format(PYTHON_VENV, worker_cli, server_args, LOG_LLM_SERVER_DIR, log_name)
    if cuda_visible_devices is not None:
        worker_sh = f'CUDA_VISIBLE_DEVICES={cuda_visible_devices} ' + worker_sh
    worker_check_sh = base_check_sh.format(LOG_LLM_SERVER_DIR, log_name, "model_worker", "model_worker")
    subprocess.run(worker_sh, shell=True, check=True)
    subprocess.run(worker_check_sh, shell=True, check=True)


def launch_gradio_sever():
    gradio_params = {
        'host': SERVER_WORKER_PARAMS['gradio_host'],
        'port': SERVER_WORKER_PARAMS['gradio_port']
    }
    log_name = 'gradio_web_server'
    gradio_sh = base_launch_sh.format(PYTHON_VENV, 'gradio_web_server', string_args(gradio_params), LOG_LLM_SERVER_DIR, log_name)
    gradio_check_sh = """while [ `grep -c "Running on" {0}/{1}.log` -eq '0' ];do
                            sleep 5s;
                            echo "wait {2} running"
                    done
                    echo '{2} running' """
    gradio_check_sh = gradio_check_sh.format(LOG_LLM_SERVER_DIR, log_name, "gradio_web_server", "gradio_web_server")
    subprocess.run(gradio_sh, shell=True, check=True)
    subprocess.run(gradio_check_sh, shell=True, check=True)


def launch_all(model_name, launch_gradio=False):
    """
    启动大模型服务器
    :param model_name:
    :param launch_gradio: 是否启动gradio_web_server
    :return:
    """

    '''
    启动controller组件
    '''
    controller_params = {
        'host': SERVER_WORKER_PARAMS['controller_host'],
        'port': SERVER_WORKER_PARAMS['controller_port']
    }
    print(f"Launching llm service,logs are located in {LOG_LLM_SERVER_DIR}...")
    print(f"开始启动LLM服务,请到{LOG_LLM_SERVER_DIR}下监控各模块日志...")
    controller_sh = base_launch_sh.format(PYTHON_VENV, 'controller', string_args(controller_params), LOG_LLM_SERVER_DIR, 'controller')
    controller_check_sh = base_check_sh.format(LOG_LLM_SERVER_DIR, 'controller', 'controller')
    subprocess.run(controller_sh, shell=True, check=True)
    subprocess.run(controller_check_sh, shell=True, check=True)
    print(f"worker启动时间视设备不同而不同，约需3-10分钟，请耐心等待...")

    '''
    启动
    暂时只支持一个大模型的启动
    '''
    launch_worker(0, model_name)

    '''
    启动openai_api_server
    '''
    server_params = {
        'host': SERVER_WORKER_PARAMS['server_host'],
        'port': SERVER_WORKER_PARAMS['server_port']
    }
    server_sh = base_launch_sh.format(PYTHON_VENV, 'openai_api_server', string_args(server_params), LOG_LLM_SERVER_DIR, 'openai_api_server')
    server_check_sh = base_check_sh.format(LOG_LLM_SERVER_DIR, "openai_api_server", "openai_api_server")
    subprocess.run(server_sh, shell=True, check=True)
    subprocess.run(server_check_sh, shell=True, check=True)

    '''
    启动gradio_web_server服务
    '''
    if launch_gradio:
        launch_gradio_sever()


def shutdown_server(serve):
    """
    关闭llm服务器
    :param serve: all, controller, model_worker, openai_api_server
    :return:
    """
    base_shell = "ps -eo user,pid,cmd|grep fastchat.serve{}|grep -v grep|awk '{{print $2}}'|xargs kill -9"
    if serve == 'all':
        shell_script = base_shell.format("")
    else:
        shell_script = base_shell.format(serve)

    subprocess.run(shell_script, shell=True, check=True)
    print(f"llm api sever --{serve} has been shutdown!")

