# scripts/start_services.py
import yaml
import subprocess
import os
from pathlib import Path

def start_vllm(model_config):
    cmd = [
        "vllm", "serve", model_config["path"],
        "--port", str(model_config["port"]),
        "--task", model_config["task"],
        "--served-model-name", model_config["name"],
        "--trust-remote-code",
        "--max-model-len", "32768",  # 0.6b模型较小，可以使用更大的序列长度
        "--max-num-batched-tokens", "32768"  # 同步调整这个参数
    ]
    env = os.environ.copy()
    env["CUDA_VISIBLE_DEVICES"] = str(model_config["gpu_id"])
    
    # 日志目录
    log_dir = Path("runtime/logs")
    log_dir.mkdir(parents=True, exist_ok=True)
    log_file = log_dir / f"{model_config['name']}.log"
    
    # 启动进程
    with open(log_file, "w") as f:
        proc = subprocess.Popen(cmd, env=env, stdout=f, stderr=subprocess.STDOUT)
        print(f"Started {model_config['name']} on port {model_config['port']}, PID={proc.pid}")
        
        # PID 目录（关键修复点）
        pid_dir = Path("runtime/pids")
        pid_dir.mkdir(parents=True, exist_ok=True)  # ← 自动创建 runtime/pids/
        pid_file = pid_dir / f"{model_config['name']}.pid"
        with open(pid_file, "w") as pidf:
            pidf.write(str(proc.pid))

# 添加主程序入口
if __name__ == "__main__":
    # 读取模型配置
    config_path = Path("config/models.yaml")
    with open(config_path, "r") as f:
        config = yaml.safe_load(f)
    
    # 启动所有模型服务
    for model_config in config["models"]:
        start_vllm(model_config)