# Copyright (c) OpenMMLab. All rights reserved.
import sys
from lmdeploy.utils import get_max_batch_size
import os
import argparse
from lmdeploy.cli.utils import (ArgumentHelper)
from constants import VERSION
from lmdeploy.cli.utils import ArgumentHelper, DefaultsAndTypesHelpFormatter, convert_args, get_chat_template, get_lora_adapters


class LlmServer(object):    
    _help = 'Serve LLMs with openai API or triton server.'
    _desc = _help
    parser = argparse.ArgumentParser(prog='llm_server',
                                     description=_desc,
                                     add_help=True)
    parser.add_argument('model_path',
                    type=str,
                    help='The path of a model. it could be one of the following '
                    'options: - i) a local directory path of a turbomind model'
                    ' which is converted by `lmdeploy convert` command or '
                    'download from ii) and iii). - ii) the model_id of a '
                    'lmdeploy-quantized model hosted inside a model repo on '
                    'huggingface.co, such as "internlm/internlm-chat-20b-4bit",'
                    ' "lmdeploy/llama2-chat-70b-4bit", etc. - iii) the model_id'
                    ' of a model hosted inside a model repo on huggingface.co,'
                    ' such as "internlm/internlm-chat-7b", "qwen/qwen-7b-chat "'
                    ', "baichuan-inc/baichuan2-7b-chat" and so on')
    parser.add_argument('--server-name', type=str, default='0.0.0.0', help='Host ip for serving')
    parser.add_argument('--server-port', type=int, default=23333, help='Server port')
    parser.add_argument('--allow-origins',
                        nargs='+',
                        type=str,
                        default=['*'],
                        help='A list of allowed origins for cors')
    parser.add_argument('--allow-credentials', action='store_true', help='Whether to allow credentials for cors')
    parser.add_argument('--allow-methods',
                        nargs='+',
                        type=str,
                        default=['*'],
                        help='A list of allowed http methods for cors')
    parser.add_argument('--allow-headers',
                        nargs='+',
                        type=str,
                        default=['*'],
                        help='A list of allowed http headers for cors')
    parser.add_argument('--proxy-url', type=str, default=None, help='The proxy url for api server.')
    parser.add_argument('--proxy-api-key', type=str, default=None, help='The api keys for proxy server.')
    parser.add_argument('--max-concurrent-requests',
                        type=int,
                        default=None,
                        help='This refers to the number of concurrent requests that '
                        'the server can handle. The server is designed to process the '
                        'engine’s tasks once the maximum number of concurrent requests is '
                        'reached, regardless of any additional requests sent by clients '
                        'concurrently during that time. Default to None.')
    # common args
    ArgumentHelper.backend(parser)
    ArgumentHelper.log_level(parser)
    ArgumentHelper.api_keys(parser)
    ArgumentHelper.ssl(parser)
    ArgumentHelper.model_name(parser)
    ArgumentHelper.max_log_len(parser)
    ArgumentHelper.disable_fastapi_docs(parser)

    # chat template args
    ArgumentHelper.chat_template(parser)

    # model args
    ArgumentHelper.revision(parser)
    ArgumentHelper.download_dir(parser)

    # pytorch engine args
    pt_group = parser.add_argument_group('PyTorch engine arguments')

    ArgumentHelper.adapters(pt_group)
    ArgumentHelper.device(pt_group)
    ArgumentHelper.eager_mode(pt_group)

    # common engine args
    dtype_act = ArgumentHelper.dtype(pt_group)
    tp_act = ArgumentHelper.tp(pt_group)
    session_len_act = ArgumentHelper.session_len(pt_group)
    max_batch_size_act = ArgumentHelper.max_batch_size(pt_group)
    cache_max_entry_act = ArgumentHelper.cache_max_entry_count(pt_group)
    cache_block_seq_len_act = ArgumentHelper.cache_block_seq_len(pt_group)
    prefix_caching_act = ArgumentHelper.enable_prefix_caching(pt_group)
    max_prefill_token_num_act = ArgumentHelper.max_prefill_token_num(pt_group)
    quant_policy = ArgumentHelper.quant_policy(pt_group)
    # turbomind args
    tb_group = parser.add_argument_group('TurboMind engine arguments')
    # common engine args
    tb_group._group_actions.append(dtype_act)
    tb_group._group_actions.append(tp_act)
    tb_group._group_actions.append(session_len_act)
    tb_group._group_actions.append(max_batch_size_act)
    tb_group._group_actions.append(cache_max_entry_act)
    tb_group._group_actions.append(cache_block_seq_len_act)
    tb_group._group_actions.append(prefix_caching_act)
    tb_group._group_actions.append(max_prefill_token_num_act)
    tb_group._group_actions.append(quant_policy)
    ArgumentHelper.model_format(tb_group)
    ArgumentHelper.rope_scaling_factor(tb_group)
    ArgumentHelper.num_tokens_per_iter(tb_group)
    ArgumentHelper.max_prefill_iters(tb_group)

    # vlm args
    vision_group = parser.add_argument_group('Vision model arguments')
    ArgumentHelper.vision_max_batch_size(vision_group)

def serve_from_yaml(config_file: str=None):
    if config_file is None:
        current_dir = os.path.dirname(os.path.abspath(__file__))
        config_file = os.path.join(current_dir, 'config', 'model.yaml')
    if not os.path.exists(config_file):
        print(f'ERROR:  config file: {config_file} not found!')
        return
    import yaml
    with open(config_file, 'r') as f:
        config = yaml.safe_load(f)
        print(config)
        from model_server import server   
        server(**config)
   

def run():
    """The entry point of running LMDeploy CLI."""
    args = sys.argv[1:]
    args = LlmServer.parser.parse_args(args)
    model_path = getattr(args, 'model_path', None)
    print(args)
    if model_path is not None and not os.path.exists(args.model_path):
        LlmServer.parser.print_help()
    else:
        model_name = model_path.split('/')[-1]
        if hasattr(args, 'model_name'):
            args.model_name = args.model_name if args.model_name else  model_name
        else:
            args.model_name = model_name
            '''
            model_path = getattr(args, 'model_path', None)
            revision = getattr(args, 'revision', None)
            download_dir = getattr(args, 'download_dir', None)
            '''        
        from model_server import server
        server(args)
def api_server():
    """Serve LLMs with restful api using fastapi."""
    args = sys.argv[1:]
    parser = LlmServer.parser  
    args = parser.parse_args()
    print(args)
    model_path = getattr(args, 'model_path', None)
    if model_path is not None and not os.path.exists(args.model_path):
        LlmServer.parser.print_help()
        exit(1)
    model_name = model_path.split('/')[-1]    
    if hasattr(args, 'model_name'):
        args.model_name = args.model_name if args.model_name else  model_name
    else:
        args.model_name = model_name   
    from lmdeploy.archs import autoget_backend
 
    max_batch_size = args.max_batch_size if args.max_batch_size \
        else get_max_batch_size(args.device)
    backend = args.backend
    if backend != 'pytorch':
        # set auto backend mode
        backend = autoget_backend(args.model_path)

    if backend == 'pytorch':
        from lmdeploy.messages import PytorchEngineConfig
        adapters = get_lora_adapters(args.adapters)
        backend_config = PytorchEngineConfig(dtype=args.dtype,
                                                tp=args.tp,
                                                max_batch_size=max_batch_size,
                                                cache_max_entry_count=args.cache_max_entry_count,
                                                block_size=args.cache_block_seq_len,
                                                session_len=args.session_len,
                                                adapters=adapters,
                                                enable_prefix_caching=args.enable_prefix_caching,
                                                device_type=args.device,
                                                quant_policy=args.quant_policy,
                                                eager_mode=args.eager_mode,
                                                max_prefill_token_num=args.max_prefill_token_num)
    else:
        from lmdeploy.messages import TurbomindEngineConfig
        backend_config = TurbomindEngineConfig(dtype=args.dtype,
                                                tp=args.tp,
                                                max_batch_size=max_batch_size,
                                                session_len=args.session_len,
                                                model_format=args.model_format,
                                                quant_policy=args.quant_policy,
                                                rope_scaling_factor=args.rope_scaling_factor,
                                                cache_max_entry_count=args.cache_max_entry_count,
                                                cache_block_seq_len=args.cache_block_seq_len,
                                                enable_prefix_caching=args.enable_prefix_caching,
                                                max_prefill_token_num=args.max_prefill_token_num)
    chat_template_config = get_chat_template(args.chat_template)
    
    from lmdeploy.messages import VisionConfig
    vision_config = VisionConfig(args.vision_max_batch_size)
    from model_server import server
    server(args.model_path,
                    model_name=args.model_name,
                    backend=backend,
                    backend_config=backend_config,
                    chat_template_config=chat_template_config,
                    vision_config=vision_config,
                    server_name=args.server_name,
                    server_port=args.server_port,
                    allow_origins=args.allow_origins,
                    allow_credentials=args.allow_credentials,
                    allow_methods=args.allow_methods,
                    allow_headers=args.allow_headers,
                    log_level=args.log_level.upper(),
                    api_keys=args.api_keys,
                    ssl=args.ssl,
                    proxy_api_key=args.proxy_api_key,
                    proxy_url=args.proxy_url,
                    max_log_len=args.max_log_len,
                    disable_fastapi_docs=args.disable_fastapi_docs,
                    max_concurrent_requests=args.max_concurrent_requests)

if __name__ == '__main__':
    api_server()
    #import fire
    #fire.Fire(api_server)        

