
# coding: utf-8
#
import asyncio
import json
import logging
import random
import logzero
from tornado.httpclient import AsyncHTTPClient, HTTPRequest
from logzero import logger
from datetime import datetime, timedelta
from threading import RLock
from tornado.web import HTTPError
from apscheduler.events import (EVENT_ALL, EVENT_JOB_ADDED, EVENT_JOB_ERROR,
                                EVENT_SCHEDULER_PAUSED,
                                EVENT_SCHEDULER_RESUMED,
                                EVENT_SCHEDULER_SHUTDOWN,
                                EVENT_SCHEDULER_STARTED)
from apscheduler.executors.pool import ProcessPoolExecutor, ThreadPoolExecutor
from apscheduler.jobstores import rethinkdb
from apscheduler.jobstores.rethinkdb import RethinkDBJobStore
from apscheduler.schedulers.background import BackgroundScheduler
from . settings import AXT_URL, server_setting
from .enums import JobStatus, TaskStatus
from .tasks import TaskBase, TaskFactory

# logzero.loglevel(logging.DEBUG)

class Job(object):
  """
    表示执行的job任务。
  """
  def __init__(self, job_id, device_serial, app_name, job_type, task_load: int, plan_datetime: datetime = datetime.min, operator_data: dict = None, task_generating_pattern:int = 24) -> None:
    self._job_id = job_id
    #任务的目标设备
    self._device_serial = device_serial
    #这个任务所执行的app
    self._app_name = app_name
    #这个任务执行的任务类型
    self._job_type = job_type
    # 计划时间
    self._plan_datetime = plan_datetime
    # 工作量
    self._task_load = task_load
    self._task_generating_pattern = task_generating_pattern
    #任务执行所需的数据或断点状态
    self._operator_data = operator_data
    self._status = JobStatus.Unkown
  
  def __eq__(self, other):
    """
      比较两个对象是否相等的函数
    """
    return self._job_id == other.job_id

  def __ne__(self, other):
    """
      比较两个对象是否不相等的函数
    """
    return self._job_id != other.job_id
  
  def __hash__(self) -> int:
    return hash(self._job_id)

  @property
  def job_id(self) -> str:
    """
      获取工作ID。
    """
    return self._job_id

  @property
  def app_name(self) -> str:
    """
      获取app名称。
    """
    return self._app_name
  
  @property
  def device_serial(self) -> str:
    """
      获取设备udid。
    """
    return self._device_serial
  
  @property
  def agent_id(self) -> str:
    """
      获取控制端agent。
    """
    return self._agent_id

  @property
  def job_type(self) -> str:
    """
      获取执行的工作类型。
    """
    return self._job_type
  
  @property
  def plan_datetime(self) -> datetime:
    """
      获取计划开始工作的时间。
    """
    return self._plan_datetime

  @property
  def task_load(self) -> int:
    """
      获取工作量。
    """
    return self._task_load
  
  @property
  def operator_data(self) -> dict():
    return self._operator_data
  
  @property
  def status(self) -> JobStatus:
    """
      获取工作状态。
    """
    return self._status

class JobManager(object):
  """
    JobManager负责管理来自外部的任务。
    包括接受任务、维持任务、报告任务状态、完成任务初始化工作。
  """
  def __init__(self, test_only: bool = False) -> None:
    
    self._job_list = dict()
    self._scheduler = self._initial_scheudler()
    self._job_locker = self._create_lock()
    self._task_factory = TaskFactory()
    self._test_only = test_only
    
  @property
  def jobs(self):
    return self._job_list

  def _initial_scheudler(self):
    # TODO: 如果要使用stores 需要解决序列化中的问题：Can't pickle local object 'BaseExecutor.__init__.<locals>.<lambda>'
    # jobstores = {
    #   'default': RethinkDBJobStore(database=RDB_DBNAME, table='apJobs', 
    #     host=RDB_HOST,
    #     port=RDB_PORT,
    #     user=RDB_USER,
    #     password=RDB_PASSWD)
    # }
    executors = {
        'default': ThreadPoolExecutor(20),
        'processpool': ProcessPoolExecutor(5)
    }
    scheduler = BackgroundScheduler(executors = executors)
    # TODO: 如果要使用stores 需要解决序列化中的问题：Can't pickle local object 'BaseExecutor.__init__.<locals>.<lambda>'
    # 这个问题是由于task嵌套对象引发的。
    #scheduler = BackgroundScheduler(executors = executors, jobstores = jobstores)
    return scheduler

  def initial(self) -> None:
    self._scheduler.add_listener(self._scheduler_job,  EVENT_ALL)
    # Start the scheduler for accpeting the new job throught the try_accept method.
    self._scheduler.start()
    logger.info("Scheduler已经启动")

  def try_accept(self, job: Job, task_finish_callback = None) -> bool:
    """
      尝试接受分配的工作。
    """
    #补充对重复 Job 的判断
    ok = self._check_job_ok(job)
    if not ok :
      return False
    
    # try:
    # 需要保证线程安全，以便接受同时来时服务器的Job请求
    with self._job_locker:
      tasks = self._create_tasks(job)
      s_jobs = list()
      for task in tasks:
        scheduler_job_id = task.get_sjob_id()
        if (self._scheduler.get_job(scheduler_job_id) == None):
          #线程安全的操作
          s_job = self._scheduler.add_job(task.execute, "date", run_date = task.plan_datetime, id=scheduler_job_id)
          s_jobs.append(s_job)
          logger.info("Task: [%s], 已经规划运行时间: %s", scheduler_job_id, task.plan_datetime)
        else:
          logger.warning("Task: [%s], 已经在计划中了", scheduler_job_id)

      self._job_list[job.job_id] = (job, tasks, s_jobs, {"completeCount":0})
      #For debugging
      logger.debug(self._scheduler.get_jobs())
      # except BaseException as e:
      #    logger.warning(e)
      #    result = False
      # finally:
        # retry here.
        # self.try_accept(job)
        # logger.info("")
    return ok

  async def clear_jobs(self, job_id) -> bool:
    phones = []
    if(job_id in self._job_list.keys()):
      # 获取Job
      job = self._job_list[job_id][0]
      #只有 job_type是adfe的时候才需要数据恢复工作。
      if job.job_type == 'adfe':
        phones = job.operator_data['cellphones']
      #获取相关job的tasks
      tasks = self._job_list[job_id][1]
      for itask in tasks:
        sjob_id = itask.get_sjob_id()
        if (self._scheduler.get_job(sjob_id) != None):
          self._scheduler.remove_job(sjob_id)
      del self._job_list[job_id]
      #只有 job_type是adfe的时候才需要数据恢复工作。
      if len(phones) > 0:
        await self._restore_status(phones)

    logger.info("任务 %s 已调度的任务序列已经清理。", job_id)

  async def _restore_status(self, phones):
    http_client = AsyncHTTPClient()
    url = server_setting.server_url + "/api/v1/job/restorestatus" # + urllib.parse.urlencode(dict(secret=source['secret']))
    headers = {'Content-Type': 'application/json'}
    # 根据status['taskType']判断如何进行不同任务类型结果的map.
    json_data = json.dumps({'phones':phones})
    try:
      request = HTTPRequest(url, method="POST", body=json_data, headers=headers)
      await http_client.fetch(request)
    except HTTPError as e:
      logger.warn("恢复数据状态更新状态失败。错误信息：%s", e)

  def upate_task_callback(self, task: TaskBase):
    '''
    调度任务执行完毕后执行的后续工作。
    '''
    #初始化一个任务状态
    status = self._task_factory.create_task_status(task)
    with self._job_locker:
      job = None
      # 添加好友受限，将终止后续处理，恢复后续被分配的手机状态为unused.
      if task.status == TaskStatus.Task_Limitation:
        if(task.job_id in self._job_list.keys()):
          job = self._job_list[task.job_id][0]
          status['completedTaskLoad'] = job.task_load
          asyncio.run(self.clear_jobs(task.job_id))
        return

      #如果设备任务确定完成，将移除所有多余的任务，并删除掉相关任务记录。
      if task.status == TaskStatus.Device_Completed:
        if(task.job_id in self._job_list.keys()):
          job = self._job_list[task.job_id][0]
          status['completedTaskLoad'] = job.task_load
          self._update_status_service(status)
          #尝试还原所有因意外没有清理的任务。
          asyncio.run(self.clear_jobs(task.job_id))
          logger.info("Task序列 [ %s ], 已经执行完毕", task.job_id)
      else:
        sjob_id = task.get_sjob_id()
        # 移除scheduler中的对应任务
        if (self._scheduler.get_job(sjob_id) != None):
          self._scheduler.remove_job(sjob_id)  
        #移除job缓存中的task，并同步数据到下一个task。
        if(task.job_id in self._job_list.keys()):
          job = self._job_list[task.job_id][0]
          tasks = self._job_list[task.job_id][1]
          self._job_list[task.job_id][3]['completeCount'] += 1
          if len(tasks) > 1 and task in tasks:
            if not task.next is None:
              #将前一个任务的执行状态传递给下一个任务
              task.next.operator_data = task.operator_data
              #将前一个任务缓存的数据传递给下一个任务
              task.next.cached_data = task.cached_data
              logger.debug("operator_data: %s and cached_data: %s", task.operator_data, task._cached_data)
            tasks.remove(task)
            logger.info("Task: %s, 清理移除", sjob_id)
            status['completedTaskLoad'] = self._job_list[task.job_id][3]['completeCount']
          else:
            status['completedTaskLoad'] = self._job_list[task.job_id][3]['completeCount'] 
            del self._job_list[task.job_id]
            logger.info("Task: %s, 清理Job", sjob_id)
        #正常执行情况下更新状态。
        self._update_status_service(status)
        logger.info("Task [ %s ], 已经执行完毕", task.get_sjob_id())

    if task.status == TaskStatus.Task_Completed_Error:
      #report task status to service.
      pass
    
    if (not job is None) and (not task.next is None):
      logger.info("Next Task: 将在设备%s执行, [ %s ]-[ %s ]-[ %s ]", \
                  task.next.device_serial, job.app_name, task.next.job_id, task.next.plan_datetime)

    # report the task status to service.
  def _update_status_service(self, status:dict):
    '''
      status:
      {
        phones = ['', '', '']
      }
    '''
    # For executing the awaitable method.
    asyncio.run(self._update_status_service_async(status))

  async def _update_status_service_async(self, status:dict):
    http_client = AsyncHTTPClient()
    # url = AXT_URL + "/api/v1/job/status" # + urllib.parse.urlencode(dict(secret=source['secret']))
    url = server_setting.server_url + "/api/v1/job/status"
    headers = {'Content-Type': 'application/json'}
    # 根据status['taskType']判断如何进行不同任务类型结果的map.
    json_data = json.dumps(self._task_factory.map_task_status(status))
    try:
      request = HTTPRequest(url, method="POST", body=json_data, headers=headers)
      await http_client.fetch(request)
    except HTTPError as e:
      logger.warn("任务 %s 更新状态失败。错误信息：%s", status['jobId'], e)

  def _check_job_ok(self, job: Job) -> bool:
    result = True
    if job.job_id in self._job_list:
      result = False
      logger.warning("针对设备 device_serial: [%s] 在 plan_datetime: [%s] 上已经存在一个计划", job.device_serial, job.plan_datetime)
    return result

  def _create_tasks(self, job: Job) -> list:
    lst = list()
    sortedlst = list()
    if(job._task_load > 0):
        total_minutes = 0
        # For debugging
        computing_task_load = job.task_load - 1 
        delay = 1 #分钟
        # 最后的任务执行时间
        last_executing_time = 5 # 分钟
        available_mins = job._task_generating_pattern*60 - delay - last_executing_time
        minum = available_mins/job.task_load if not self._test_only else 30
        first_run = True
        for minutes in self._timedelta_generator(computing_task_load, available_mins, minum):
          total_minutes += minutes
          # 第一个任务是无需延迟的
          if first_run:
            task = self._task_factory.create(job.app_name, job.job_type, job.job_id, job.device_serial, job.operator_data, self.upate_task_callback)
            task.plan_datetime = job.plan_datetime + timedelta(minutes=1)
            lst.append(task)
            first_run = False

          task = self._task_factory.create(job.app_name, job.job_type, job.job_id, job.device_serial, job.operator_data, self.upate_task_callback)
          if self._test_only :
            # For test
            task.plan_datetime = job.plan_datetime + timedelta(seconds=total_minutes)
          else:
            # For Production
            task.plan_datetime = job.plan_datetime + timedelta(minutes=total_minutes)
          lst.append(task)

        # 确保task按照执行时间排序，保持与shceduler的一致。
        sortedlst = sorted(lst, key = lambda p:p.plan_datetime)

        previous = None
        # 对排序后的列表中的task做链表处理，确保关联正确。
        for task in sortedlst:
          #构建单链表
          if not previous is None:
            previous.next = task
            previous = task
          else:
            previous = task
        # For debugging
        # logger.info(sortedlst)
    return sortedlst

  def _timedelta_generator(self, task_num, total_minutes, min:int = 10):
    """
      这个时间分配方法在min过大的时候产生负数。
    """
    task_remain = int(task_num)

    for i in range(int(task_num) -1):
        max_minute = total_minutes/task_remain*2
        minute = int(round(random.uniform(0, max_minute), 2))
        task_minute = minute if minute > min else min
        task_remain -= 1
        total_minutes -= task_minute
        yield task_minute
    yield round(int(total_minutes), 2)
  
  def _scheduler_job(self, event):
    '''
    schedule任务执行完毕后的处理。
    '''
    if(event.code == EVENT_JOB_ERROR):
      print(event.exception)
    if(event.code == EVENT_JOB_ADDED):
      print(event)
    if(event.code == EVENT_SCHEDULER_STARTED):
      #isinstance(event, SchedulerEvent)
      print(event)
    if(event.code == EVENT_SCHEDULER_SHUTDOWN):
       print(event)
    if(event.code == EVENT_SCHEDULER_PAUSED):
      print(event)
    if(event.code == EVENT_SCHEDULER_RESUMED):
      print(event)

  def _create_lock(self):
    """Creates a reentrant lock object."""
    return RLock()

# Keep JobManager is running in singleton
jobManager = JobManager(False)
jobManager.initial()
