# -*- coding: UTF-8 -*-
"""
补偿因为系统崩溃或其他原因造成任务已接收但未被正常调度的即时任务，时间范围：本日内
被补偿的条件是：状态至少为 RECEIVED,即已经通过可行性检查
fixme 这个补偿脚本要重新定义补偿，延时任务也需要补偿，但是补偿前需要确认是否过期
"""
import asyncio
import datetime
import json
from typing import List, Dict

from common_libs import datetime_utils
from common_libs.cache_utils import get_redis_connection
from common_libs.ctx_utils import app_context
from dispatcher import app
from dispatcher.common_utils import constants
from dispatcher.models.data_block import DataBlockModelManager
from dispatcher.routers.dispatch.dispatch_task_model_in import DispatchTaskState
from dispatcher.celery_work.handle.handle_tasks import handle_dispatch_task


class DumpTask(object):

    @classmethod
    async def run(cls):
        time_range = [datetime_utils.get_time(), datetime_utils.time_window("now", days=1)]
        where = {
            "next_try_time__range": time_range,  # 时间窗口
            "dispatch_type": constants.CURRENT_DISPATCH_TYPE,
            "state__in": [DispatchTaskState.RECEIVED],  # todo 这里只有刚过完检查的会被补偿
            "remain_try_times__ge": 1
        }
        tasks = await DataBlockModelManager.query_expires_soon_tasks(where=where, limit=constants.MAX_CACHE_DELAY_NUM)
        await cls.bulk_delay_task_handle(tasks)

    @staticmethod
    async def bulk_delay_task_handle(beans: List[Dict]):
        """
        解析批量的持久化任务，放到缓存中
        :param beans:
        :return:
        """
        conn = await get_redis_connection()

        # todo 这里应该是全部解析完后，批量缓存，批量启动
        for bean in beans:
            key = f"{bean.get('dispatch_type')}:{bean.get('task_type')}:{bean.get('block_sn')}"
            bean_data = json.dumps(bean, ensure_ascii=False, separators=(',', ':'))
            await conn.setex(name=key, value=bean_data)
            handle_dispatch_task.apply_async(args=(bean_data,))


async def work():
    async with app_context(app):
        await DumpTask.run()  # todo 这里是类直接调度，开多个会不会有问题？


if __name__ == '__main__':
    # fixme 这里需要获取参数， 执行脚本， 使用pm2管理脚本，需要自己定义脚本执行规则
    loop = asyncio.get_event_loop()
    task = asyncio.ensure_future(work())
    loop.run_until_complete(task)
