"""
Start a proxy and (potential a lot of, if data parallel is enabled) API servers.
The proxy acts as a load balancer which uses round-robin to distribute the requests to the API servers.
"""
import os, sys
import subprocess
import argparse
import multiprocessing
from typing import Optional
from backends import BACKEND_TO_PORTS
import ray
MODEL_TO_PARALLEL_PARAMS = {
    "/home/wsy/workspace/llm/model/Llama-2-7b-hf": {
        "vllm": 2,
        "sglang": 2,
        "distserve": (1, 1, 1, 1),
        "mixserve":(1,1,1,1),
        "semi-pd":2
    },
     "/home/wsy/workspace/llm/model/Qwen3-8B": {
        "vllm": 2,
        "sglang": 2,
        "distserve": (1, 1, 1, 1),
        "mixserve":(1,1,1,1),
        "semi-pd":2
    }
}

def api_server_starter_routine(
    port: int,
    args: argparse.Namespace
):
    """
    Start the target API server on the target port.
    For vllm,distserve and mixserve.Ray cluster should be started first.
    """
    use_dummy_weight = os.environ.get("USE_DUMMY_WEIGHT", "0") in ["1", "true", "True"]
    if args.backend == "vllm":
        assert ray.init(_node_ip_address=args.master_addr),"ray cluster should be initialized"
        if args.is_master.lower()=='true':
            tp_world_size = MODEL_TO_PARALLEL_PARAMS[args.model]["vllm"]
            script = f"""
            python -m vllm.entrypoints.api_server \\
            --host 0.0.0.0 --port {port} \\
            --disable-log-requests \\
            --model {args.model} --dtype half \\
            {"--load-format dummy" if use_dummy_weight else ""} \\
            -tp {tp_world_size} \\
            --block-size 16 --seed 0 \\
            --swap-space 16 \\
            --gpu-memory-utilization 0.95 \\
            --max-num-batched-tokens 16384 \\
            --max-num-seqs 1024 \\
                """
            
    elif args.backend == "distserve":
        context_tp, context_pp, decoding_tp, decoding_pp = MODEL_TO_PARALLEL_PARAMS[args.model]["distserve"]
        assert ray.init(_node_ip_address=args.master_addr),"ray cluster should be initialized"
        if args.is_master: 
            script = f"""
        python -m distserve.api_server.distserve_api_server \\
        --host 0.0.0.0 \\
        --port {port} \\
        --model {args.model} \\
        --tokenizer {args.model} \\
        {"--use-dummy-weights" if use_dummy_weight else ""} \\
        \\
        --context-tensor-parallel-size {context_tp} \\
        --context-pipeline-parallel-size {context_pp} \\
        --decoding-tensor-parallel-size {decoding_tp} \\
        --decoding-pipeline-parallel-size {decoding_pp} \\
        \\
        --block-size 16 \\
        --max-num-blocks-per-req 128 \\
        --gpu-memory-utilization 0.95 \\
        --swap-space 16 \\
        \\
        --context-sched-policy fcfs \\
        --context-max-batch-size 128 \\
        --context-max-tokens-per-batch 8192 \\
        \\
        --decoding-sched-policy fcfs \\
        --decoding-max-batch-size 1024 \\
        --decoding-max-tokens-per-batch 65536
    """
       

    elif args.backend == "mixserve":
            context_tp, context_pp, decoding_tp, decoding_pp = MODEL_TO_PARALLEL_PARAMS[args.model]["mixserve"]
            assert ray.is_initialized(),"ray cluster should be initialized"
            if args.is_master:
                script = f"""
        python -m mixserve.api_server.mixserve_api_server \\
            --host 0.0.0.0 \\
            --port {port} \\
            --model {args.model} \\
            --tokenizer {args.model} \\
            {"--use-dummy-weights" if use_dummy_weight else ""} \\
            \\
            --context-tensor-parallel-size {context_tp} \\
            --context-pipeline-parallel-size {context_pp} \\
            --decoding-tensor-parallel-size {decoding_tp} \\
            --decoding-pipeline-parallel-size {decoding_pp} \\
            \\
            --block-size 16 \\
            --max-num-blocks-per-req 128 \\
            --gpu-memory-utilization 0.95 \\
            --swap-space 16 \\
            \\
            --context-sched-policy fcfs \\
            --context-max-batch-size 128 \\
            --context-max-tokens-per-batch 8192 \\
            \\
            --decoding-sched-policy fcfs \\
            --decoding-max-batch-size 1024 \\
            --decoding-max-tokens-per-batch 65536
        """
            
    
    elif args.backend == "semi-pd":
        os.environ["GLOO_SOCKET_IFNAME"] = args.gloo_socket_ifname
        tp_world_size = MODEL_TO_PARALLEL_PARAMS[args.model]["semi-pd"]
        script = f"""
            python -m sglang.launch_server \
        --model-path {args.model} \
        --host 0.0.0.0 --port {port} --trust-remote-code  --disable-radix-cache \
        --enable-semi-pd  --mem-fraction-static 0.85 --tp {tp_world_size}
    """
    elif args.backend =="sglang":
        
        os.environ["GLOO_SOCKET_IFNAME"] = args.gloo_socket_ifname
        
        tp_world_size = MODEL_TO_PARALLEL_PARAMS[args.model]["sglang"]
        script = f"""
            python -m sglang.launch_server  --model-path /home/wsy/workspace/llm/model/Qwen3-8B \
                --dist-init-addr { args.master_addr}:{args.master_port} --nnodes {args.world_size} \
                --node-rank  {args.rank} --host 0.0.0.0 --port 8200 --trust-remote-code  --disable-radix-cache --mem-fraction-static 0.85 --tp {tp_world_size}
        
        """
        script='''
         python -m sglang.launch_server  --model-path /home/wsy/workspace/llm/model/Qwen3-8B  --dist-init-addr 10.156.186.2:8600 --nnodes 2 --node-rank 0 --host 0.0.0.0 --port 8200 --mem-fraction-static 0.85 --tp 2'''
    
    print(f"Starting server with command {script}")
    subprocess.run(["bash", "-c", script])


def metadata_server_process(port, args: argparse.Namespace):
    """
    Start a small HTTP server, which returns the metadata of the API servers
    as JSON
    """
    import json
    import http.server
    import socketserver
    
    class MetadataServer(http.server.SimpleHTTPRequestHandler):
        def do_GET(self):
            self.send_response(200)
            self.send_header('Content-type', 'application/json')
            self.end_headers()
            self.wfile.write(json.dumps(args.__dict__).encode())
        def log_message(self, format, *args):
            pass
    
    with socketserver.TCPServer(("", port), MetadataServer) as httpd:
        print("The metadata server is serving at port", port)
        httpd.serve_forever()
    
    
def main(args: argparse.Namespace):
    print(args)
    port = BACKEND_TO_PORTS[args.backend]
    process = multiprocessing.Process(
        target=metadata_server_process,
        args=(port+1, args,)
    )
    process.start()
    api_server_starter_routine(
        port,
        args
    )

def check_args(args):
    if args.backend=="sglang" or args.backend=='semi-pd':
        assert args.master_addr is not None \
            and args.master_port is not None\
            and args.gloo_socket_ifname is not None \
                and args.rank is not None \
                    and args.world_size is not None,"sglang series system should config master_addr,master_port,gloo_socket_ifname,rank,world_size"
    else:
        assert args.is_master is not None and args.master_addr is not None,"vllm series system should clarify if the node is master"
if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--backend",
        type=str,
        required=True,
        help="The serving backend"
    )
    parser.add_argument(
        "--model",
        type=str,
        required=True,
        help="The model to be served"
    )
    parser.add_argument(
        "--master_addr",
        type=str,
        required=False,
        default=None,
        help="the IP address of master"
        
    )
    parser.add_argument(
        "--master_port",
        type=str,
        required=False,
        default=None,
        help="the port of master"
    )
    parser.add_argument(
        "--gloo_socket_ifname",
        type=str,
        required=False,
        default=None,
        help="ifname of network interface card corresponding to the local ip address"
    )
    parser.add_argument(
        "--rank",
        type=str,
        required=False,
        default=None,
        help="rank of the node"
    )
    parser.add_argument(
        "--is_master",
        type=str,
        required=False,
        default=None,
        help="True if the node is master or not"
    )
    parser.add_argument(
        "--world_size",
        type=str,
        required=False,
        default=None,
        help="the number of all nodes"
    )
    args = parser.parse_args()
    check_args(args)
    main(args)
    