import redis
import os
import time
import threading
from multiprocessing import Process
import sys
sys.path.append("../")
from Dataset.pull_data_from_sql_class import pullDataFromSql

# Redis 连接配置
redis_client = redis.Redis(host='10.240.1.3', port=13296, password="white&is329tygpq26", db=2)

# GPU 资源管理
GPU_COUNT = 2  # 假设有 2 张 GPU 卡，您可以根据实际数量修改
available_gpus = list(range(GPU_COUNT))  # 可用的 GPU ID 列表
gpu_lock = threading.Lock()  # 用于线程安全的 GPU 分配

def get_available_gpu():
    """获取一个可用的 GPU ID，若无可用则返回 None"""
    with gpu_lock:
        if available_gpus:
            return available_gpus.pop(0)
        return None

def release_gpu(gpu_id):
    """释放指定的 GPU ID"""
    with gpu_lock:
        available_gpus.append(gpu_id)

def worker():
    """工作进程，从 Redis 队列中获取任务并处理"""
    while True:
        # 从 Redis 队列中获取任务（阻塞式）
        task = redis_client.blpop('task_queue', timeout=0)[1]  # 阻塞等待任务
        group_name = task.decode('utf-8')
        
        # 获取可用 GPU
        gpu_id = get_available_gpu()
        while gpu_id is None:
            time.sleep(1)  # 若无可用 GPU，等待 1 秒后重试
            gpu_id = get_available_gpu()
        
        # 设置环境变量以使用分配的 GPU
        os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
        
        try:
            # 配置任务参数
            json_file = f'/opt/wyh/LSP_book_rec/configs/{group_name}.json'
            db_dir = "/opt/wyh/LSP_book_rec/global_configs"
            db_config_path = os.path.join(db_dir, "mysql_db_config.json")
            
            # 初始化并执行任务
            pull_proc = pullDataFromSql(group_name=group_name, config_path=json_file, db_config_path=db_config_path)
            pull_proc.update_local_database()
            print(f"Task for {group_name} completed successfully on GPU {gpu_id}")
        
        except Exception as e:
            print(f"Error processing {group_name} on GPU {gpu_id}: {e}")
        
        finally:
            # 无论成功或失败，都释放 GPU 资源
            release_gpu(gpu_id)

def main():
    # 定义任务组
    group_names = ["LUIBE", "JXSTNU"]  # 您可以根据需要修改 group_names
    
    # 将任务放入 Redis 队列
    for group_name in group_names:
        redis_client.rpush('task_queue', group_name)
        print(f"Task for {group_name} added to queue")
    
    # 启动工作进程，数量与 GPU 数量一致
    worker_processes = []
    for _ in range(GPU_COUNT):
        p = Process(target=worker)
        p.start()
        worker_processes.append(p)
    
    # 等待所有工作进程完成（可选，根据需求决定是否阻塞主线程）
    for p in worker_processes:
        p.join()

if __name__ == "__main__":
    main()