from __base__ import set_django_path

set_django_path()

from async_task.models import Job, Task, TaskDepend, JobStatus, TaskStatus
from lucommon.logger import lu_logger
from typing import List
import time
import copy
import pickle
import pika
import conf


def execute_task(channel: pika.BlockingConnection.channel, task: Task):
    channel.basic_publish(exchange="", routing_key="brood2", body=pickle.dumps(task),
                          properties=pika.BasicProperties(delivery_mode=2))
    lu_logger.info("send task {}".format(task.id))
    task.status = TaskStatus.EXECUTING
    task.save()


def execute_job(channel: pika.BlockingConnection.channel, job: Job):
    tasks = Task.objects.filter(job_id=job.id)
    task_map = {task.id: task for task in tasks}

    task_depends = TaskDepend.objects.filter(task_id__in=[task.id for task in tasks])
    task_depend_graph = {task.id: set() for task in tasks}
    for depend in task_depends:
        task_depend_graph[depend.task_id].add(depend.depend_task_id)

    # 拓扑排序
    task_execute_layer: List[List[Task]] = []
    task_depend_graph_copy = copy.deepcopy(task_depend_graph)
    while task_depend_graph_copy:
        current_layer_ids = []
        for task_id, depend_task_id_set in task_depend_graph_copy.items():
            if not depend_task_id_set:
                # 入度为0代表无依赖，加入当前执行层
                current_layer_ids.append(task_id)
        for task_id in current_layer_ids:
            # 选出当前执行的层后，将依赖从图中删除
            task_depend_graph_copy.pop(task_id)
            for depend_set in task_depend_graph_copy.values():
                if task_id in depend_set:
                    depend_set.remove(task_id)
        if not current_layer_ids:
            # 图中没有节点的入度是0，存在循环依赖
            lu_logger.error("job{}下的task存在循环依赖".format(job.id))
            job.status = JobStatus.FAIL
            job.save()
            return

        task_execute_layer.append([task_map[task_id] for task_id in current_layer_ids])

    for layer in task_execute_layer:
        layer_status = set()
        for task in layer:
            layer_status.add(task.status)
            if task.status == TaskStatus.PENDING:
                execute_task(channel, task)

        if layer_status != {TaskStatus.SUCCESS}:
            # 该层所有任务都成功，再执行下一层
            break

    # 根据task状态，计算job状态
    task_status_set = {task.status for task in tasks}
    if task_status_set - {TaskStatus.PENDING} == set():
        # 全部任务都在pending
        job.status = JobStatus.PENDING
    elif TaskStatus.PENDING in task_status_set or TaskStatus.EXECUTING in task_status_set:
        job.status = JobStatus.EXECUTING
    elif task_status_set - {TaskStatus.SUCCESS} == set():
        # 全部任务都成功
        job.status = JobStatus.SUCCESS
    else:
        job.status = JobStatus.FAIL

    job.save()


def daemon():
    connection = pika.BlockingConnection(
        pika.ConnectionParameters(
            host=conf.RABBITMQ_HOST,
            port=conf.RABBITMQ_PORT,
            virtual_host=conf.RABBITMQ_VHOST,
            credentials=pika.PlainCredentials(conf.RABBITMQ_USER, conf.RABBITMQ_PASSWORD)
        ))
    channel = connection.channel()
    channel.queue_declare(queue=conf.RABBITMQ_QUEUE)

    while True:
        channel.basic_publish(exchange='', routing_key='heartbeat_queue', body=b'')
        jobs = Job.objects.filter(status__in=[JobStatus.PENDING, JobStatus.EXECUTING])
        for job in jobs:
            lu_logger.info("catch job {}".format(job.id))
            execute_job(channel, job)
        time.sleep(5)
        lu_logger.info("next loop")


if __name__ == '__main__':
    daemon()
