
import arrow
import traceback

from celery_once import QueueOnce
from bson import ObjectId
from threading import Thread
from flask import current_app as app
from mongoengine.errors import NotUniqueError

from app.celery.handle_result.events import Event
from app.celery.handle_result.utils import update_task_alert_settings, save_spider_urls
from app.errors import (
    JobNotFoundError,
    CeleryPushError,
    TaskNotFoundError, NoticeError
)
from app.celery.action.error import do_delete_task, do_rollback_task_progress
from app.celery.handle_result.changeCheck import update_baseline
from app import celery_app
from app.db import redis
from app.db.models.alertreceivers import AlertReceivers
from app.db.models.jobs import Job, ProxyIpResult
from app.db.models.balances import Balances
from app.db.models.users import User
from app.db.models.tasks import Task
from app.db.models.alert_groups import AlertGroup
from app.db.models.asset_tasks import AssetTasks
from app.db.models.api_token import OutPutConfig
from app.libs.alertsender import ContentAlertSender
from app.libs.scanner import BatchRemoveTask
from app.libs.enums import (
    TASK_UPDATE_KEY_MAP,
    TaskType,
    NotifyType as NotifyTypeName,
    TaskTriggerType,
    ExTaskType,
    EX_TASK_TYPES
)
from app.libs.utility import get_flat_dict_value, trans_interval
from app.libs.scanner import ScannerRequest
from app.libs.wechat import WeChat
from app.libs.yunpian import YunPian
from app.libs.sendcloud import SendCloud
from app.handler.v2_alert import create_system_notification
from app.config.settings import OUTPUT_QUEUE
from app.api.v2.job_util import update_last_web_security_status


@celery_app.task(acks_late=True)
def celery_send_to_alert_group(params, alert_group, notification):
    alert_group = [ObjectId(_id) for _id in alert_group]
    sendcloud = SendCloud()
    yunpian = YunPian()
    wechat = WeChat()

    params['url'] = app.config.get('SCANVHOST')
    params['domain_list'] = _trans_domain_list(params.get('domain_list', []))
    email_failed_params = ('user', 'name', 'dz', 'num1', 'dz1')
    email_params = ('user', 'name', 'dz', 'time', 'num', 'domain_list', 'url')
    sms_params = ('name', 'dz', 'num', 'time')
    email_failed_template = 'scanvmax_defense_blacklist'
    email_template = 'scanvmax_risk_response'
    sms_template = '5238706'
    wx_tpl = 'nYSlrlA4dKS9AhTa8TCxdxSgDFdJr9vW5ph4BI9UT3g'
    wx_url = f"{params['url']}/#/yfld/blacklist/url"

    receiver_id_list = []
    for _group_id in alert_group:
        receiver_id_list += AlertGroup.objects.filter(pk=_group_id).first().receiverIds

    receiver_list = []
    receiver_uid_list = []
    for _id in set(receiver_id_list):
        if _item := AlertReceivers.objects.filter(pk=_id, enable=True).first():
            receiver_list.append(_item)
            receiver_uid_list.append(_item.uid)

    rid_user_map = {receiver.id: User.objects.filter(pk=receiver.uid).first() for receiver in receiver_list}

    thread_list = []

    # inbox
    if notification.get('inBox') and params.get('num', 0) > 0:
        msg = f"资产{params.get('name', '')}({params.get('dz', '')})在" \
              f"{params.get('time')}发现{params.get('num', 0)}个风险URL，已自动加黑。"
        create_system_notification(receiver_uid_list, '系统加黑通知', msg)

    for receiver in receiver_list:
        # email
        if notification.get('email') and receiver.emailVerified:
            if params.get('num', 0) > 0:
                args = {
                    'to': [receiver.email],
                    'sub': {f"%{_k}%": [str(params.get(_k, ''))] for _k in email_params}
                }
                thread_list.append(Thread(target=sendcloud.sendmail, args=(email_template, args)))
            if params.get('num1', 0) > 0:
                args = {
                    'to': [receiver.email],
                    'sub': {f"%{_k}%": [str(params.get(_k, ''))] for _k in email_failed_params}
                }
                thread_list.append(Thread(target=sendcloud.sendmail, args=(email_failed_template, args)))

        if notification.get('sms') and params.get('num') > 0:
            # sms
            if receiver.phoneVerified:
                args = '&'.join([f"#{_k}#={params.get(_k, '')}" for _k in sms_params])
                thread_list.append(Thread(target=yunpian.tpl_send, args=(receiver.phone, sms_template, args)))

            # wechat
            if (_user := rid_user_map.get(receiver.id)) and \
                    (_open_id := _user.wechat.get('wx_openid')) and \
                    notification.get('wechat'):
                args = {
                    "first": {
                        "value": "风险URL已应急处理",
                    },
                    "content": {
                        "value": f"{params.get('name')}（{params.get('dz', '')}）在 {params.get('time', '')} "
                                 f"监测发现的{params.get('num', '')}个风险URL已加入云防御URL黑名单中。",
                    },
                    "occurtime": {
                        "value": f"{params.get('time')}",
                    },
                    "remark": {
                        "value": "登录ScanV控制台查看详情。",
                    }
                }
                thread_list.append(Thread(target=wechat.send_tpl_msg, args=(wx_tpl, _open_id, wx_url, args)))

    for thread in thread_list:
        thread.start()

    for thread in thread_list:
        thread.join()

    del thread_list

    app.logger.info(f"Succeed send to alert group '{alert_group}'.")


def _trans_domain_list(domain_list):
    t_body = """<div style="width: 500px;margin: 0 auto; border: 1px solid #e7eaef;boder-radius: 2px;padding:10px 0">
    <div style="max-height: 100px; overflow-y:auto">
        <table>
            <tbody>
                {tr}
            </tbody>
        </table>
    </div>
</div>"""
    tr = """<tr>
    <td style="padding:0 10px">{number}丶{url}</td>
</tr>"""
    s0 = ""
    for number, domain in enumerate(domain_list):
        s0 = f"{s0}{tr.format(number=number + 1, url=domain)}"

    return t_body.format(tr=s0).replace('\n', '')


def update_asset_task_status(request_data, status=None):
    task_id = request_data.get("task_id") or request_data.get("taskId")
    ref_id = request_data.get('task', {}).get('addition', {}).get("refId")
    task_type = request_data.get('task', {}).get('addition', {}).get("taskType")
    if obj := AssetTasks.objects.filter(id=ref_id).first():
        obj.update_status(task_type=task_type, status=status)
    elif obj := AssetTasks.objects.find_one({f"{task_type}Settings.taskId": task_id}):
        obj.update_status(task_type=task_type, status=status)


def update_job_proxy_ip(request_data, runtime=None):
    task_type = request_data.get("taskType")
    job_id = request_data.get("jobId")
    proxy_ip = request_data.get("proxyIp")
    if not (job := Job.objects.filter(id=job_id).first()):
        return
    runtime = runtime if runtime else arrow.utcnow().datetime
    proxy_ip_result = {p.task_type: p for p in job.proxy_ip_result}
    if proxy_ip_result.get(task_type) and not proxy_ip:
        proxy_ip_result.pop(task_type)
    elif proxy_ip:
        _this = {"ip": proxy_ip, "runtime": runtime, "task_type": task_type}
        proxy_ip_result.update({task_type: ProxyIpResult.from_dict(_this)})
    job.proxy_ip_result = list(proxy_ip_result.values())
    job.save()


def notify_queued(data):
    skip_status = ('completed', 'stopped', 'active')

    task_data = get_task_data(data)
    query_dict = get_query_dict(task_data)

    if task_data.get("taskType") in EX_TASK_TYPES:
        return

    task_data['startTime'] = data.get('notified_at')
    task_data['endTime'] = data.get('notified_at')
    task_data['status'] = 'waiting'

    if task := Task.objects.filter(**query_dict).first():
        if not Job.objects.filter(pk=task.jobId).first():
            raise JobNotFoundError(task.jobId, str(task.pk))

        if task.status in ('waiting',):
            task.update(startTime=data.get('notified_at'))
            return
        if task.status in skip_status:
            return
        if task.progress >= 99:
            return

        # Remove startTime if manual task.
        if task.triggerType == TaskTriggerType.manual.value:
            task_data.pop('startTime')
        task.update(**task_data)
    else:
        try:
            Task(**task_data).save()
        except NotUniqueError:
            pass
    update_asset_task_status(data, "waiting")
    update_job_proxy_ip(task_data, runtime=task_data['endTime'])


def notify_processing(data):
    allow_status = ('waiting', 'active', 'completed', 'stopped')
    skip_status = ('completed', 'stopped')

    task_data = get_task_data(data)
    query_dict = get_query_dict(task_data)
    task_data['endTime'] = data.get('notified_at')
    task_data['status'] = 'active'

    if task_data.get("taskType") in EX_TASK_TYPES:
        return

    if task_data.get('progress', 0) >= 99:
        task_data['progress'] = 98

    if task := Task.objects.filter(**query_dict, status__in=allow_status).first():
        if task.status in skip_status:
            return
        if task_data['progress'] < task.progress:
            return
        task.update(**task_data)
    elif task_data.get("taskSessionId") and task_data.get("taskId"):
        task_data['startTime'] = data.get('notified_at', arrow.utcnow().datetime)
        try:
            Task(**task_data).save()
        except NotUniqueError:
            pass
    update_asset_task_status(data, "active")
    update_job_proxy_ip(task_data, runtime=task_data['endTime'])


def notify_completed(data):
    allow_status = ('waiting', 'active', 'stopped', 'completed')
    skip_status = ('completed', 'stopped')

    if data.get("task", {}).get("addition", {}).get("taskType") == ExTaskType.change_check_config.value:
        change_check_task_id = data.get("task", {}).get("job_config_map", {}).get(
            "nscan", {}).get("plugin_config", {}).get("change_check", {}).get("task_id", "")
        # 更新篡改监测样本
        update_baseline(TaskTriggerType.manual.value, change_check_task_id, "")
        # 样本更新完成后，需要删除引擎测任务
        BatchRemoveTask(ids=[data.get("task", {}).get("task_id"), ]).batch_remove()
        return

    task_data = get_task_data(data)
    query_dict = get_query_dict(task_data)

    task = Task.objects.filter(**query_dict, status__in=allow_status).first()
    if (not task) and task_data.get("taskSessionId") and task_data.get("taskId"):
        task_data['startTime'] = data.get('notified_at', arrow.utcnow().datetime)
        try:
            task = Task(**task_data).save()
        except NotUniqueError:
            task = Task.objects.filter(**query_dict, status__nin=skip_status).first()
    if not task:
        raise TaskNotFoundError(f"{query_dict}, previous task not found.")

    if not Job.objects.filter(pk=task.jobId).first():
        raise JobNotFoundError(task.jobId, str(task.pk))

    if task.status in skip_status:
        return
    if task.status == 'waiting':
        task_data['status'] = 'active'
    if task.progress >= 99:
        return

    task_data['endTime'] = data.get('notified_at')
    task_data["progress"] = 99
    task_data.pop("notificationTarget", 0)

    app.logger.info(f"Accept completed, task_id: {str(task.pk)}, task_type: {task.taskType}")
    app.logger.info(f"Is task type content ? {task.taskType == 'content'}")

    if task.taskType == 'content':
        app.logger.info(f"Do content task.")
        event = Event(data['task_session_id'], '', task.taskType, job_id=task.jobId)
        resp = task.update(**task_data, result=event.get_event_info())
        func = do_content_task
    else:
        # 处理content类型之外的结果数据
        # foreign
        plugins = task_data.get('taskSettings', {}).get('collect', {}).get('plugins', [])
        event = Event(data['task_session_id'], '', task.taskType, job_id=task.jobId, plugins=plugins)
        resp = task.update(**task_data, result__addition=event.get_event_info())
        # 处理其他类型数据completed格式及发送告警
        func = do_other_task

    if resp > 0:
        task_pk = str(task.pk)
        func(task_pk)
    else:
        raise NoticeError(msg="Mongo update failed")

    update_job_proxy_ip(task_data, runtime=task_data["endTime"])


def redis_task_id_lock_wrapper(func):
    def wrapper(task_id: str, **kwargs):
        task_lock = f"task_lock:{task_id}"
        if not redis.set(task_lock, task_id, ex=60, nx=True):
            return
        try:
            return func(task_id, **kwargs)
        except Exception as e:
            raise e
        finally:
            redis.delete(task_lock)

    return wrapper


@celery_app.task(base=QueueOnce, once={'graceful': True}, acks_late=True, name="tasks:do_content_task")
@redis_task_id_lock_wrapper
def do_content_task(task_id: str, retry=False, **kwargs):
    """

    :param task_id:
    :param retry:
    :param kwargs:
    :return:
    """
    task = Task.objects.with_id(task_id)
    job = Job.objects.with_id(task.jobId)
    asset_task = AssetTasks.objects.filter(contentSettings__taskId=task.taskId).first()
    if not task:
        return
    used = 0 if task.triggerType == 'schedule' else 1

    # 更新task securityStatus task.status='completed',  task.progress=100, endTime, 周期任务还要更新alertSettings
    query = {
        'securityStatus': 'warning' if task.result.warningCount > 0 else 'safe',
        'status': 'completed',
        'progress': 100,
        # 'endTime': arrow.utcnow().datetime
    }
    if not retry:
        query['isLatestTask'] = True
    if not used:
        query['alertSettings'] = job.alertSettings
    task.modify(**query)

    if retry:
        return

    # 更新套餐 balances.maxPage 自定义套餐还需要更新 balances.contentManualTask.used,
    Balances.objects(uid=task.uid).update_one(
        # inc__balance__contentManualTask__used=used,  # 保持与别的任务一致，在任务创建成功的时候扣除次数
        inc__balance__maxPage__used=task.result.addition.get('statistics', {}).get('urls', 0)
    )
    # 更新task isLatestTask
    Task.objects.filter(isLatestTask=True, jobId=task.jobId, taskType=task.taskType, pk__ne=task.pk).update(
        isLatestTask=False)

    # 更新资产 job.contentResultId
    job.update(contentResultId=task.pk, contentPrevResultId=job.contentResultId)

    # 更新job安全状态
    job = Job.objects.filter(pk=task.jobId).first()
    update_last_web_security_status(job)

    # 更新asset_task
    if asset_task:
        asset_task.update(contentResultId=task.pk, contentPrevResultId=asset_task.contentResultId)
        # 重新加载一下数据
        asset_task.reload()
        task.reload()
        asset_task._get_web_security_last_result(is_save=True)
        asset_task.update_status(task_type="content", status="completed")
    try:
        save_spider_urls(task.id)
    except Exception as e:
        app.logger.info(
            f"[01]更新任务URL记录失败：job: {job.note}{job.id}, task: {task.target}-{task.taskType}-{task.id}, {e}")
    # 发送邮件/短信/微信/站内信 /每天短信邮件限制
    if kwargs.get('send_alert', True):
        job, task = update_task_alert_settings(job, task)
        if task.alertSettings.alertGroupIds:
            alert_group = AlertGroup.objects.filter(pk=task.alertSettings.alertGroupIds[0]).first()
            if alert_group and alert_group.enable:
                alert_sender = ContentAlertSender(task)
                alert_sender.send_alerts()
    # 推送任务数据
    out_config = OutPutConfig.objects.find({"uid": task.uid, "is_active": True}).first()
    if out_config and task.taskType in out_config.push_task_types:
        redis.rpush(OUTPUT_QUEUE, str(task.id))


@celery_app.task(base=QueueOnce, once={'graceful': True}, acks_late=True, name="tasks:do_other_task")
@redis_task_id_lock_wrapper
def do_other_task(task_id: str, retry=False, **kwargs):
    task = Task.objects.with_id(task_id)
    from .other_tasks import TaskResultHandler
    result_handler = TaskResultHandler(retry=retry)
    result_handler.handle(task)


def notify_aborted(data):
    skip_status = ('completed', 'failed')
    task_data = get_task_data(data)

    if task_data.get("taskType") in EX_TASK_TYPES:
        return

    query_dict = get_query_dict(task_data)
    task = Task.objects.filter(**query_dict).first()
    if not task:
        return
    task_data["progress"] = task.progress
    if task.status in skip_status:
        return
    if task_data["progress"] > 99:
        task_data["progress"] = 99
    task_data['endTime'] = data.get('notified_at')
    task.update(**task_data, status="stopped")
    update_asset_task_status(data, "stopped")
    update_job_proxy_ip(task_data, runtime=task_data['endTime'])


def notify_failed(data):
    task_data = get_task_data(data)

    if task_data.get("taskType") in EX_TASK_TYPES:
        return

    query_dict = get_query_dict(task_data)
    if not (task := Task.objects.filter(**query_dict, status=['active', 'stopped']).first()):
        raise TaskNotFoundError(f"{query_dict}, previous task not found.")

    if task.status == 'stopped':
        return

    task_data['endTime'] = data.get('notified_at')
    task.update(**task_data, status="failed")
    update_asset_task_status(data, "failed")
    update_job_proxy_ip(task_data, runtime=task_data['endTime'])


def repeat_tasks(data):
    task_id_list = data.get('task', {}).get('addition', {}).get('task_id_list')
    for task in Task.objects.filter(id__in=task_id_list).only('id', 'taskSessionId', 'taskType', 'taskSettings',
                                                              'jobId'):
        task_session_id = task.taskSessionId

        if not Job.objects.filter(pk=task.jobId).first():
            raise JobNotFoundError(task.jobId, str(task.pk))

        # TODO: Support asset type.
        if task.taskType in [TaskType.asset.value]:
            continue
        # TODO
        if task.taskType == TaskType.content.value:
            func = do_content_task
            event = Event(task_session_id, '', task.taskType, job_id=str(task.jobId))
        else:
            func = do_other_task
            plugins = task.taskSettings.collect.get('plugins')
            event = Event(task_session_id, '', task.taskType, plugins=plugins)
        resp = task.update(status='active', progress=99, result__addition=event.get_event_info())
        if resp > 0:
            task_pk = str(task.pk)
            if not app.config.get('DEBUG', False):
                resp = func.delay(task_pk, retry=True)
                if not resp:
                    # rollback progress
                    raise CeleryPushError(task_pk)
                app.logger.info(resp)
            else:
                func(str(task.pk), retry=True)
        else:
            raise NoticeError("Mongo update failed")


RECEIVE_NOTIFY = {
    'NT_TASK_QUEUED': notify_queued,
    'NT_TASK_PROCESSING': notify_processing,
    'NT_TASK_COMPLETED': notify_completed,
    'NT_TASK_ABORTED': notify_aborted,
    'NT_TASK_FAILED': notify_failed,
    'NT_ACTION_REPEAT': repeat_tasks
}


# ********* new end ********* #


def get_task_data(data: dict) -> dict:
    data_dict = {}
    for data_k, task_k in TASK_UPDATE_KEY_MAP.items():
        if (_temp_data := get_flat_dict_value(data, data_k)) is None:
            continue
        if task_k == "*":
            for k, v in _temp_data.items():
                data_dict[k] = v if v != '' else None
        else:
            data_dict[task_k] = _temp_data if _temp_data != '' else None

    if (interval := data_dict['taskSettings'].get('interval')) and isinstance(interval, dict):
        data_dict['taskSettings']['interval'] = trans_interval(interval)
    if proxy_ip := data_dict.get("proxyIp"):
        data_dict["proxyIp"] = proxy_ip
    else:
        data_dict["proxyIp"] = ""
    return data_dict


def get_query_dict(task_data):
    query = {}
    if _id := task_data.pop('pk', None):
        query['id'] = _id
    else:
        query['taskSessionId'] = task_data.get('taskSessionId')
    return query


@celery_app.task(acks_late=True)
def sync_result():
    scanner = ScannerRequest()
    url = "/v1/notifications:list?notify_type_name={notice_type}&is_all_ack=false&limit=9999999"
    comp = scanner.get(url.format(notice_type='NT_TASK_COMPLETED')).get('notifications', [])
    stop = scanner.get(url.format(notice_type='NT_TASK_ABORTED')).get('notifications', [])
    fail = scanner.get(url.format(notice_type='NT_TASK_FAILED')).get('notifications', [])
    active_task_session_tuple = tuple(
        _item.taskSessionId for _item in Task.objects.filter(status='active', progress__lt=100))

    for _data in comp:
        if _data.get('task_session_id') in active_task_session_tuple:
            sync_update.delay(_data, 'NT_TASK_COMPLETED')
    for _data in stop:
        if _data.get('task_session_id') in active_task_session_tuple:
            sync_update.delay(app, _data, 'NT_TASK_ABORTED')
    for _data in fail:
        if _data.get('task_session_id') in active_task_session_tuple:
            sync_update.delay(_data, 'NT_TASK_FAILED')


def sync_result_waiting():
    scanner = ScannerRequest()
    url = "/v1/notifications:list?notify_type_name={notice_type}&is_all_ack=false&limit=9999999"
    waiting = scanner.get(url.format(notice_type='NT_TASK_QUEUED')).get('notifications', [])

    for _data in waiting:
        sync_update.delay(_data, 'NT_TASK_QUEUED')


def sync_result_active():
    scanner = ScannerRequest()
    url = "/v1/notifications:list?notify_type_name={notice_type}&is_all_ack=false&limit=9999999"
    active = scanner.get(url.format(notice_type='NT_TASK_PROCESSING')).get('notifications', [])

    for _data in active:
        sync_update.delay(_data, 'NT_TASK_PROCESSING')


@celery_app.task(acks_late=True)
def sync_update(notice_data, notice_type):
    try:
        scanner = ScannerRequest()
        task_id = notice_data.get('task_id')
        task_session_id = notice_data.get('task_session_id')
        scan_task = scanner.get(f"/v1/tasks:get?task_id={task_id}")
        scan_task_session = scanner.get(
            f"/v1/tasks/sessions:list?task_session_id={task_session_id}").get('task_sessions', [])
        if scan_task and scan_task_session:
            scan_task_session = scan_task_session[0]
            notice_data['task'] = {
                'addition': scan_task_session.get('addition', {}),
                'progress': scan_task_session.get('progress', 0),
                'job_names': scan_task_session.get('job_names'),
                'next_start_at': scan_task.get('next_start_at')
            }

            # 下个周期已正在执行，前一个周期任务下次执行时间取下个任务开始时间
            if scan_task.get('next_start_at') and (
                    scan_task['status_name'] not in ['TS_ABORTING', 'TS_ABORTED', 'TS_COMPLETED', 'TS_FAILED']):
                notice_data['task']['next_start_at'] = scan_task['last_started_at']

            if func := RECEIVE_NOTIFY.get(notice_type):
                func(notice_data)
                app.logger.info(f"Succeed Sync task_session_id '{notice_data.get('task_session_id')}'.")
                print(f"Succeed Sync task_session_id '{notice_data.get('task_session_id')}'.")
    except Exception as e:
        traceback.print_exc()
        app.logger.exception(e)


@celery_app.task(acks_late=True)
def repeat_celery_tasks(isodate_low, isodate_high, task_type=None, send_alert=False, is_latest=False):
    task_query = {
        'status': NotifyTypeName.NT_TASK_PROCESSING.value,
        'progress': 99,
    }
    if is_latest and task_type:
        pipeline = [
            {'$match': {'taskType': task_type, 'status': 'completed',
                        'startTime': {'$gt': arrow.get(isodate_low).datetime}}},
            {'$sort': {'startTime': -1}},
            {"$group": {'_id': '$jobId', 'task_id': {'$first': '$_id'}}}
        ]

        for item in Task.objects.aggregate(pipeline):
            if task_id := str(item.get('task_id', '')):
                if task_type == TaskType.content.value:
                    do_content_task.delay(task_id, send_alert=False)
                else:
                    do_other_task.delay(task_id, send_alert=False)

    else:
        if isodate_low:
            task_query['startTime'] = {'$gte': arrow.get(isodate_low).datetime}
        if isodate_high:
            task_query.setdefault('startTime', {}).update({'$lte': arrow.get(isodate_high).datetime})
        if task_type:
            task_query['task_type'] = task_type

    tasks = Task.objects.only('id', 'taskType').find(task_query)

    app.logger.info(f"Start repeat tasks. Total: {tasks.count()}")
    for task in tasks:
        task_id = str(task.id)
        if task.taskType == TaskType.content.value:
            do_content_task.delay(task_id, send_alert=send_alert)
        else:
            do_other_task.delay(task_id, send_alert=send_alert)


SYNC_FUNC = {
    'waiting': sync_result_waiting,
    'active': sync_result_active,
    'completed': sync_result
}


@celery_app.task(name="load_scan_events_for_scanv", acks_late=True)
def load_scan_events_for_scanv(request_args):
    notify_type = request_args.get("notify_type_name")
    if not (func := RECEIVE_NOTIFY.get(notify_type)):
        return
    try:
        func(request_args)
        app.logger.info("Data processing completed.")
        return
    except TaskNotFoundError as e:
        app.logger.info(e)
        return
    except JobNotFoundError as e:
        app.logger.info(e)
        func = do_delete_task
        func(task_id=e.task_id)
        return
    except CeleryPushError as e:
        app.logger.exception(e)
        traceback.print_exc()
        do_rollback_task_progress(e.task_id)
        return
    except Exception as e:
        app.logger.exception(e)
        traceback.print_exc()
        return
    finally:
        app.logger.info(request_args)
