"""
Start a proxy and (potential a lot of, if data parallel is enabled) API servers.
The proxy acts as a load balancer which uses round-robin to distribute the requests to the API servers.
"""
import os, sys
import subprocess
import argparse
import multiprocessing

from backends import BACKEND_TO_PORTS

MODEL_TO_PARALLEL_PARAMS = {
    "/data0/model/Qwen3-8B/": {
        "vllm": (4,2),
        "sglang": (4,2),
        "semi-pd":(4,2)
    },
    '/data0/model/Qwen3-8B/Qwen3_8B-bin/':{
        "distserve": (4, 1, 4, 1),
        "mixserve":(4,1,4,1),
    }
}

def api_server_starter_routine(
    port: int,
    args: argparse.Namespace
):
    """
    Start the target API server on the target port
    """
    use_dummy_weight = os.environ.get("USE_DUMMY_WEIGHT", "0") in ["1", "true", "True"]
    if args.backend == "vllm":
        tp_world_size,pp_world_size = MODEL_TO_PARALLEL_PARAMS[args.model]["vllm"]
        script = f"""
python -u -m vllm.entrypoints.api_server \\
    --host 0.0.0.0 --port {port} \\
    --disable-log-requests \\
    --model {args.model} --dtype half \\
    {"--load-format dummy" if use_dummy_weight else ""} \\
    -tp {tp_world_size} \\
    -pp {pp_world_size}\\
    --block-size 16 --seed 0 \\
    --swap-space 16 \\
    --gpu-memory-utilization 0.95 \\
    --max-num-batched-tokens 16384 \\
    --max-num-seqs 1024 \\
    --enforce_eager\\
        """
    

    elif args.backend == "distserve":
        context_tp, context_pp, decoding_tp, decoding_pp = MODEL_TO_PARALLEL_PARAMS[args.model]["distserve"]
        script = f"""
python -m distserve.api_server.distserve_api_server \\
    --host 0.0.0.0 \\
    --port {port} \\
    --model {args.model} \\
    --tokenizer {args.model} \\
    {"--use-dummy-weights" if use_dummy_weight else ""} \\
    \\
    --context-tensor-parallel-size {context_tp} \\
    --context-pipeline-parallel-size {context_pp} \\
    --decoding-tensor-parallel-size {decoding_tp} \\
    --decoding-pipeline-parallel-size {decoding_pp} \\
    \\
    --block-size 16 \\
    --max-num-blocks-per-req 128 \\
    --gpu-memory-utilization 0.95 \\
    --swap-space 16 \\
    \\
    --context-sched-policy fcfs \\
    --context-max-batch-size 128 \\
    --context-max-tokens-per-batch 8192 \\
    \\
    --decoding-sched-policy fcfs \\
    --decoding-max-batch-size 1024 \\
    --decoding-max-tokens-per-batch 65536
"""
    elif args.backend == "mixserve":
            context_tp, context_pp, decoding_tp, decoding_pp = MODEL_TO_PARALLEL_PARAMS[args.model]["mixserve"]
            script = f"""
    python -m mixserve.api_server.mixserve_api_server \\
        --host 0.0.0.0 \\
        --port {port} \\
        --model {args.model} \\
        --tokenizer {args.model} \\
        {"--use-dummy-weights" if use_dummy_weight else ""} \\
        \\
        --context-tensor-parallel-size {context_tp} \\
        --context-pipeline-parallel-size {context_pp} \\
        --decoding-tensor-parallel-size {decoding_tp} \\
        --decoding-pipeline-parallel-size {decoding_pp} \\
        \\
        --block-size 16 \\
        --max-num-blocks-per-req 128 \\
        --gpu-memory-utilization 0.95 \\
        --swap-space 16 \\
        \\
        --context-sched-policy fcfs \\
        --context-max-batch-size 128 \\
        --context-max-tokens-per-batch 8192 \\
        \\
        --decoding-sched-policy fcfs \\
        --decoding-max-batch-size 1024 \\
        --decoding-max-tokens-per-batch 65536
    """
    elif args.backend == "semi-pd":
            tp_world_size,pp_world_size = MODEL_TO_PARALLEL_PARAMS[args.model]["semi-pd"]
            script = f"""
            python -m sglang.launch_server \
        --model-path {args.model} \
        --host 0.0.0.0 --port {port} --trust-remote-code  --disable-radix-cache \
        --enable-semi-pd  --mem-fraction-static 0.85 --tp {tp_world_size} --pp-size {pp_world_size} --disable-cuda-graph
    """
    elif args.backend =="sglang":
        tp_world_size,pp_world_size = MODEL_TO_PARALLEL_PARAMS[args.model]["sglang"]
        script = f"""
            python -m sglang.launch_server \
        --model-path {args.model} \
        --host 0.0.0.0 --port {port} --trust-remote-code  --disable-radix-cache \
        --mem-fraction-static 0.85 --tp {tp_world_size} --pp-size {pp_world_size} --disable-cuda-graph
        
        """
    
    print(f"Starting server with command {script}")
    subprocess.run(["bash", "-c", script])


def metadata_server_process(port, args: argparse.Namespace):
    """
    Start a small HTTP server, which returns the metadata of the API servers
    as JSON
    """
    import json
    import http.server
    import socketserver
    
    class MetadataServer(http.server.SimpleHTTPRequestHandler):
        def do_GET(self):
            self.send_response(200)
            self.send_header('Content-type', 'application/json')
            self.end_headers()
            self.wfile.write(json.dumps(args.__dict__).encode())
        def log_message(self, format, *args):
            pass
    
    with socketserver.TCPServer(("", port), MetadataServer) as httpd:
        print("The metadata server is serving at port", port)
        httpd.serve_forever()
    
    
def main(args: argparse.Namespace):
    print(args)
    port = BACKEND_TO_PORTS[args.backend]
    process = multiprocessing.Process(
        target=metadata_server_process,
        args=(port+1, args,)
    )
    process.start()
    api_server_starter_routine(
        port,
        args
    )


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--backend",
        type=str,
        required=True,
        help="The serving backend"
    )
    parser.add_argument(
        "--model",
        type=str,
        required=True,
        help="The model to be served"
    )
    
    args = parser.parse_args()
    main(args)
    