# -*- coding: utf-8 -*-
# @Time        : 2023/9/12 18:17
# @Author      : bai.<byscut2010@foxmail.com>
# @File        : task_signal_handler.py
# @Description :
import traceback
from celery import Celery, Task
from celery.signals import task_prerun, task_success, task_failure, task_revoked, task_retry
# from domain.ai_edit import tasks
import requests
from config.env.config_env import run_config
from celery.signals import worker_process_init

TASK_TYPE_LIST_PUBLIC = "list_public"
TASK_TYPE_AI_EDITOR = "ai_editor"

#
# @worker_process_init.connect
# def prep_db_pool(**kwargs):
#     """
#         When Celery fork's the parent process, the db engine & connection pool is included in that.
#         But, the db connections should not be shared across processes, so we tell the engine
#         to dispose of all existing connections, which will cause new ones to be opend in the child
#         processes as needed.
#         More info: https://docs.sqlalchemy.org/en/latest/core/pooling.html#using-connection-pools-with-multiprocessing
#     """
#     # The "with" here is for a flask app using Flask-SQLAlchemy.  If you don't
#     # have a flask app, just remove the "with" here and call .dispose()
#     # on your SQLAlchemy db engine.
#     from run import app
#     from config import server_config
#     with app._app.app_context():
#         server_config.db_engine.dispose()
#
#
# @task_success.connect
# def task_success_handler(sender, result, **kwargs):
#     from domain.draft import draft_interactor
#     from database.db_manager import init_session, get_session
#     print(f"[DEBUG] entering callback")
#     init_session()
#     print(f"[INFO] Task success {sender}, {result} {type(result)}")
#     draft_id = None
#     try:
#         if result.get("task_type") == TASK_TYPE_AI_EDITOR:
#             if result.get("data"):
#                 from domain.draft.video import draft_video_interactor
#
#                 draft_id = result.get("data").get("draft_id")
#                 data = result.get("data").get("data")
#                 draft_video_interactor.recv_video_ai_match_result(draft_id, data)
#
#         elif result.get("task_type") == TASK_TYPE_LIST_PUBLIC:
#             if result.get("data"):
#                 draft_id = result.get("data").get("draft_id")
#                 project_id = result.get("data").get("project_id")
#                 data = result.get("data").get("data")
#                 draft_interactor.update_ai_media(project_id, draft_id, data)
#         session = get_session()
#         session.commit()
#         session.close()
#     except Exception as e:
#         print(traceback.format_exc(50))
#         session = get_session()
#         session.rollback()
#         session.close()
#
#     # 回调，发起预览任务
#     if result.get("task_type") == TASK_TYPE_AI_EDITOR and draft_id:
#         from domain.draft.video import draft_video_interactor
#         draft_video_interactor.notify_video_preview_start(draft_id, True)
#
#
# @task_failure.connect
# def report_failure(sender, task_id, args, kwargs, einfo, **_kwargs):
#     # TODO：任务失败发送告警，以及更新处理状态
#     print(f"[INFO] Task faliure: {task_id} {einfo}")
#
#
# @task_revoked.connect
# def task_canceld(sender, request, terminated, signum, expired, **kwargs):
#     # TODO: 任务被取消后 hook（可能由于队列过多等待超时之类的原因）
#     print(f"[INFO] task canceld:{request.get_json()}")
