from celery import Celery
from urllib.parse import quote
from kombu.serialization import register
import time
from task_flow.dag_util import DAG
from task_flow.serialization import xdumps, xloads
from logging import getLogger
from typing import Any, Dict

logger = getLogger(__name__)

__all__ = ['redis_celery_app',"send_chain_tasks"]


def redis_celery_app(host, port, passwd, name='model_chain') -> Celery:
    """
    根据redis的
    :param host:
    :param port:
    :param passwd:
    :param name:
    :return:
    """
    app = Celery(name, broker=f"redis://:{quote(passwd)}@{host}:{port}/7")
    app.conf.update(
        worker_hijack_root_logger=False,  # ✅ 禁用 Celery 对 root logger 的劫持
        result_backend=f"redis://:{quote(passwd)}@{host}:{port}/8",
        result_expires=60 * 15  # 设置任务结果的过期时间为 15分钟
    )
    app.log.setup_logging_subsystem = lambda *args, **kwargs: None

    register(
        'mypickle_zstd',  # 序列化器名称
        xdumps,
        xloads,
        content_type='application/x-pickle-zstd',  # 准确描述格式
        content_encoding='binary'  # 因为输出是原始 bytes
    )

    app.conf.task_serializer = 'mypickle_zstd'
    app.conf.result_serializer = 'mypickle_zstd'
    app.conf.accept_content = ['mypickle_zstd']

    return app


def send_chain_tasks(celery_app: Celery, dag_conf: Dict, node_name: str, order_id: str,
                     is_backtrack: bool = False) -> Dict:
    """
    解析配置，调用celery任务节点，注意配置中的func必须严格 映射到 name和queue都同名的celery后台任务
    :param celery_app:
    :param dag_conf:
    :param node_name:
    :param order_id:
    :param is_backtrack:
    :return:
    """
    logger.info(f"node_name:{node_name} 接受到订单 order_id:{order_id},is_backtrack:{is_backtrack} ")
    req_results = {"base_info": {"order_id": order_id}}
    dag = DAG(dag_conf)
    task_chain = dag.task_chain({node_name})
    logger.info(f"node_name:{node_name} 完成任务规划,order_id:{order_id},{task_chain}")

    t1 = time.time()
    for layer_idx, task_names in enumerate(task_chain):
        part_resutls = {}
        t3 = time.time()
        logger.info(
            f"node_name:{node_name} 子任务开始,order_id:{order_id},【第 {layer_idx + 1} 层】: {task_names} 开始")
        for task_name in task_names:
            node_info = dag_conf[task_name]
            deps = set(node_info["deps"] + ['base_info'])
            func_name = node_info["func"]
            req_json = {k: req_results[k] for k in deps}
            if is_backtrack: # 若为回溯任务，则将对应的任务配置为空，采用默认的配置，即 根据order_id缓存计算，而不是base_info中的其他信息
                part_resutls[task_name] = celery_app.send_task(func_name, args=[req_json, {task_name: {}}],
                                                               queue=func_name)
            else:
                part_resutls[task_name] = celery_app.send_task(func_name,
                                                                  args=[req_json, {task_name: node_info}],
                                                                  queue=func_name)
        for k, v in part_resutls.items():
            r_dict = v.get(timeout=20)
            state_code = r_dict['code']
            if state_code == 200:
                req_results[k] = r_dict['data']
            else:
                raise ValueError(
                    f"node_name:{node_name} 子任务失败,order_id:{order_id},【第 {layer_idx + 1} 层】子任务:{k},计算失败{state_code},报错如下:\n{r_dict['msg']}")
        t4 = time.time()
        logger.info(
            f"node_name:{node_name} 子任务结束,order_id:{order_id},【第 {layer_idx + 1} 层】: {task_names} 完成，耗时: {t4 - t3:.4f}s")

    t2 = time.time()
    logger.info(f"node_name:{node_name} 完成计算,order_id:{order_id},耗时:{round(t2 - t1, 4)}")
    return req_results