# 通过celery来设计定时任务，每1秒钟执行一次任务的查询工作;
# 获取的策略是：
# 1、5个小时内如果没有结果的数据，置为错误；
# 2、每次处理最多100个数据；
import apscheduler.schedulers.background as scheduler
import dbExcute
import celeryAsrTask
import celeryClipTask

# timeout设置的是小时数（hour)
TIMEOUT = 1
LIMITNUM = 1000

def CeleryTaskScheduler():
    setAsrTaskTimeout()
    # 首先获取识别任务的执行状态
    updateAsrTaskStatus()
    updateClipTaskStatus()
    print(f"运行一次定时{list}")
    return

# 设置任务超时(TIMEOUT=8), 在此根据btime的值进行判断，不是使用ctime；
def setAsrTaskTimeout():
    upsql = f"update subtask set status = -2, etime=now(), reason='处理超时' where status = 1 and process != 100 and TIMESTAMPDIFF(HOUR, btime, NOW()) > {TIMEOUT}"
    dbExcute.updateData(upsql)
    sql1 = "select uuid1 from subtask where status = -2"
    list = dbExcute.getTaskIdList(sql1)
    for row in list:
        uuid = row[0]
        if (uuid == ''):
            continue
        # 设置日志和处理当前的记录状态为status=-1
        dbExcute.addTaskLog(uuid, "ASR任务超时")
        sql2 = f"update subtask set status = -1 where uuid1 = '{uuid}'"
        dbExcute.updateData(sql2)

# 获取任务列表: subtask中的status：
# 1 asr处理中 
# 2 完成
# -1 错误
# process: 0:等待处理，1:任务处理中, 100:任务处理完成
# 0 取消
# # 查询celery的任务状态:
# # PENDING	任务等待中
# # STARTED	任务已开始
# # SUCCESS	任务执行成功
# # FAILURE	任务执行失败
# # RETRY	任务将被重试
# #REVOKED	任务取消
# # PROGRESS	任务进行中
def updateAsrTaskStatus():

    # 获取step == 1 并且 status == 1的任务处理;
    sql = f"select uuid1 from subtask where step=1 and process= 1 order by id asc limit {LIMITNUM}"
    list = dbExcute.getTaskIdList(sql)
    for row in list:
        uuid = row[0]
        if (uuid == ''):
            continue
        print(f"Celery查询ASR任务状态 ID={uuid}")
        celery_result = celeryAsrTask.celeryAsrTask.AsyncResult(uuid)
        state = celery_result.state
        print(f"返回结果:[{state}]")
        if (state == 'SUCCESS'):
            dbExcute.addTaskLog(uuid, "ASR任务执行完成[celery]")
            set_asr_task_success(uuid)
            continue
        if (state == 'FAILURE' or state == 'REVOKED'):
            info = celery_result.result.args[0]
            set_asr_task_fault(uuid, info)
            dbExcute.addTaskLog(uuid, "ASR任务失败[celery]")
            continue
        continue

def updateClipTaskStatus():
    
    # 获取step == 2 并且 process == 1的任务处理;
    sql = f"select uuid1, uuid2 from subtask where step=2 and process= 1 order by id asc limit {LIMITNUM}"
    list = dbExcute.getTaskIdList(sql)
    for row in list:
        uuid = row[0]
        uuid1 = row[1]
        if (uuid1 == ''):
            continue
        print(f"Celery查询Clip任务状态 ID={uuid}")
        celery_result = celeryClipTask.celeryClipTask.AsyncResult(uuid1)
        state = celery_result.state
        print(f"返回结果:[{state}]")
        if (state == 'SUCCESS'):
            dbExcute.addTaskLog(uuid, "剪切任务执行完成[{uuid1}]")
            set_clip_task_success(uuid)
            continue
        if (state == 'FAILURE' or state == 'REVOKED'):
            info = celery_result.result.args[0]
            set_clip_task_fault(uuid, info)
            dbExcute.addTaskLog(uuid, "剪切任务执行错误[{uuid1}]")
            continue
        continue

# 对于任务完成的内容，则置状态为2;
def set_asr_task_success(uuid):
    upsql = f"update subtask set etime=now(), status = 1, process=100, reason='' where uuid1='{uuid}'"
    dbExcute.updateData(upsql)
 
def set_asr_task_fault(uuid, info="处理失败"):
    upsql = f"update subtask set status = -1, etime=now(), process=100, reason='{info}' where uuid1='{uuid}'"
    if (not dbExcute.updateData(upsql)):
        return  

# 对于剪切任务完成的内容，则置process=100;
def set_clip_task_success(uuid):
    upsql = f"update subtask set etime=now(), status = 1, process=100, reason='' where uuid1='{uuid}'"
    dbExcute.updateData(upsql)

def set_clip_task_fault(uuid, info="处理失败"):
    upsql = f"update subtask set status = -1, etime=now(), reason='{info}', process=100 where uuid1='{uuid}'"
    if (not dbExcute.updateData(upsql)):
        return  

scheduler_instance = scheduler.BackgroundScheduler()
scheduler_instance.add_job(func=CeleryTaskScheduler, trigger='interval', seconds=60)
scheduler_instance.start()