# encoding=utf-8

"""FlaskPredictWaitTaskQueue.py
    - 使用简单的队列完成任务的存储工作
    - 入队的同时，完成redis信息的写入
"""
import threading
from concurrent.futures.thread import ThreadPoolExecutor

from seg_system.application_config import ApplicationConfig
from seg_system.LongTaskStatusUpload import MultiStatusUploadImpl
from seg_system.Multibatch import BatchPredictService
from seg_system.Vascular import VascularService

from seg_common.TaskQueue.ITaskQueue import ITaskQueue
from seg_common.LoggingService import ConsoleService
from seg_common.Exception import ExceptionCallBack
from seg_middleware.Redis.RedisServcie import RedisService


class FlaskPredictWaitTaskQueue(ITaskQueue):
    def __init__(self, **kwargs):
        super(FlaskPredictWaitTaskQueue, self).__init__(**kwargs)
        # 存储任务
        self.task_queue = self.make_storage()

        # 上传信息
        self.task_queue_uploader = RedisService()
        self.task_state_upload_service = MultiStatusUploadImpl()

        # 进程信息
        self.future_obj = None
        self.executor = ThreadPoolExecutor(max_workers=1)
        self.future_thread_dict = {}

        # 执行器
        self.batch_predict_service = BatchPredictService()
        self.vascular_predict_service = VascularService()

        # 日志
        # self.logger = ConsoleService.get_logger()

        # 锁
        self.lock = threading.Lock()

    def make_storage(self, **kwargs):
        return []

    def task_in(self, o, **kwargs):
        self.task_queue.append(o)
        self.update_each_queue_info(o)
        self.invoke()

    def task_out(self, **kwargs):
        if len(self.task_queue) == 0:
            return None
        return self.task_queue.pop(0)

    def invoke(self, **kwargs):

        self.lock.acquire()
        if self.future_obj is None or self.future_obj.done():

            if len(self.task_queue) == 0:
                self.lock.release()
                return

            self.update_queue_info()  # 队列信息更新
            task_dict = self.task_out()

            self.task_state_upload_service.update_seq(task_dict['redis_id'])
            future = self.executor.submit(
                self.batch_predict_service.predict_start,
                task_dict['saver'], "",
                task_dict['uName'],
                task_dict['token'],
                task_dict['config'],
                vascular_saver=task_dict['vascular_saver'],
                vascular_predictor=self.vascular_predict_service,
                redis_id=task_dict['redis_id']
            )
            self.future_thread_dict[id(future)] = task_dict['redis_id']
            self.future_obj = future
            future.add_done_callback(self.thread_callback)

        self.lock.release()

    def update_queue_info(self):
        for each_task in self.task_queue:
            key = ApplicationConfig.SystemConfig.REDIS_TASK_QUEUE_PRE + each_task['redis_id']
            value = self.task_queue_uploader.key_get(key)
            self.update_each_queue_info(each_task, value['location'] - 1 if value['location'] > 0 else 0)

    def update_each_queue_info(self, task_info: dict, location: int = -1):
        key = ApplicationConfig.SystemConfig.REDIS_TASK_QUEUE_PRE + task_info['redis_id']
        value = {
            'location': len(self.task_queue) if location == -1 else location
        }

        self.task_queue_uploader.key_set_dict(key, value)

    def thread_callback(self, worker):
        worker_exception = worker.exception()
        if worker_exception:
            msg = 'Worker error occur: {}'.format(worker_exception)
            ConsoleService.console_log(msg, ConsoleService.EXCEPTION)

            redis_id = self.future_thread_dict[id(worker)]
            self.task_state_upload_service.update_total(redis_id, has_finished=True)
            self.task_state_upload_service.update_state(redis_id, state=msg)

        ConsoleService.console_log("v1 predict callback occur", ConsoleService.INFO)

        self.invoke()  # 开始下一轮运算

