# remind: 49-deqa-/root/xjh/gen_eval - GPU服务 | 49-reward1-/root/deqa/reward-server - CPU服务

# run
# nohup gunicorn "app_geneval:create_app()" --timeout 480 > cpu_0730_geneval_gunicorn.log 2>&1 &

# kill
# ps -ef | grep 'gunicorn app_geneval:create_app' | grep -v grep | awk '{print $2}' | xargs kill -9

import os

NUM_DEVICES = 4  # 实际 GPU 数目
USED_DEVICES = set()

# 让主进程可见所有 GPU
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(str(i) for i in range(NUM_DEVICES))

def pre_fork(server, worker):
    global USED_DEVICES
    # 简单轮转分配 GPU
    for i in range(NUM_DEVICES):
        if i not in USED_DEVICES:
            worker.device_id = i
            USED_DEVICES.add(i)
            break

def post_fork(server, worker):
    # 在子进程里设置默认 GPU device
    os.environ["CUDA_VISIBLE_DEVICES"] = str(worker.device_id)
    import torch
    torch.cuda.set_device(0)  # 因环境变量已限定, 这里只有 id 0

def child_exit(server, worker):
    global USED_DEVICES
    USED_DEVICES.discard(worker.device_id)

bind = "0.0.0.0:18089"
# bind = "0.0.0.0:18080" # For debug
workers = NUM_DEVICES
worker_class = "sync"
timeout = 120


