import subprocess
import os
import time
import utils
import torch
model_name = "hf-models/glm-4-9b-chat"

os.environ["HF_HUB_DOWNLOAD_TIMEOUT"] = "120"

os.environ['HF_HOME'] = '/data'

api_server_command = [
    "python",
    "-m",
    "vllm.entrypoints.openai.api_server",
    "--model",
    model_name,
    "--dtype",
    "float16",
    "--api-key",
    "",
    "--tensor-parallel-size",
    str(torch.cuda.device_count() or 1),
    "--trust-remote-code",
    "--gpu-memory-utilization",
    "0.75",
    "--max-num-batched-tokens",
    "45000",
    "--max-model-len",
    "45000",
    "--max-context-len-to-capture",
    "45000",
    "--disable-log-requests",
    "--disable-log-stats",
    "--port",
    "8000",
]


def wait_for_service(url):
    while True:
        try:
            server_ready = utils.is_port_open(url)
            if server_ready:
                return True
        except:
            time.sleep(10)


chainlit_ui_process = subprocess.Popen(
    ['python', '-m', 'chainlit', 'run', 'chainlit_ui_crawler.py', '--host', '0.0.0.0', '--port', '7860', "--headless"])

if (wait_for_service("http://127.0.0.1:7860")):
    api_process = subprocess.Popen(
        api_server_command, text=True)
    print("Chainlit UI 服务已启动，开始启动 API 服务...")


try:
    api_process.wait()
    chainlit_ui_process.wait()
finally:
    api_process.kill()
    chainlit_ui_process.kill()
    print("Servers shut down.")
