import redis
import subprocess
import concurrent.futures
import time
import logging
import socket
from multiprocessing import Pool, cpu_count
import psutil
import configparser

# 设置日志配置
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 读取配置文件
config = configparser.ConfigParser()
config.read('config.ini')

# 配置 Redis
redis_host = config['redis']['host']
redis_port = config['redis']['port']
redis_db = config['redis']['db']
redis_password = config['redis']['password']  # 读取 Redis 密码
pool_size = int(config['multiprocessing']['pool_size'])

r = redis.Redis(host=redis_host, port=int(redis_port), db=int(redis_db), password=redis_password)  # 添加密码参数

# 获取当前机器的主机名
current_machine = socket.gethostname()

def execute_task(task_id):
    task_data = r.hgetall(task_id)
    task = {k.decode(): v.decode() for k, v in task_data.items()}
    command = task['command']
    assigned_machine = task['machine_id']

    if current_machine != assigned_machine:
        logger.info(f"Task {task_id} assigned to {assigned_machine}, but current machine is {current_machine}. Skipping.")
        return

    logger.info(f"Starting task {task_id} with command: {command}")
    try:
        result = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT)
        task['status'] = 'completed'
        task['result'] = result.decode()
        logger.info(f"Task {task_id} completed successfully.")
    except subprocess.CalledProcessError as e:
        task['status'] = 'failed'
        task['result'] = e.output.decode()
        logger.error(f"Task {task_id} failed with error: {e.output.decode()}")

    # 使用hset逐个设置字段
    r.hmset(task_id, mapping=task)

def worker():
    with concurrent.futures.ThreadPoolExecutor(max_workers=pool_size) as executor:
        while True:
            task_id = r.brpop('task_queue', 0)[1].decode()
            
            task_data = r.hgetall(task_id)
            assigned_machine = task_data.get(b'machine_id', b'').decode()

            if assigned_machine == current_machine:
                logger.info(f"Retrieved task {task_id} from queue")
                # 提交任务给新的进程来执行
                executor.submit(execute_task, task_id)
            else:
                r.lpush('task_queue', task_id)  # 将任务重新放回队列
            # 隔一秒
            time.sleep(5)
            

def monitor_resources_and_adjust_pool(pool):
    while True:
        if psutil.cpu_percent() > 80:
            pool._processes = max(1, pool._processes - 1)
        elif psutil.cpu_percent() < 50:
            pool._processes = min(cpu_count(), pool._processes + 1)
        time.sleep(10)

if __name__ == '__main__':
    with Pool(1) as pool:
        pool.apply_async(monitor_resources_and_adjust_pool, (pool,))
    worker()