#!usr/bin/env python3
# encoding:utf-8
# 该程序会定时从mysql数据库中提取数据进行处理，每100毫秒查询一次数据库；如果有多个任务，则进行并行处理，每个服务器最多为10个线程;
import _thread
from gettext import Catalog
from multiprocessing import Lock
import time,threading
import datetime
import json, requests
import uuid
import os,sys
import shutil
import json
import subprocess
import tarfile
os.path.join(os.path.dirname(__file__), '../')
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from public import pmysql
from image_compare import compareImg
from video_prepare import prepare
from ai_predict import imgCategory
import logging
from copy import deepcopy

upload_url = "http://43.138.70.166/upload_zswdsyfwq_012345678"
logger = logging.getLogger(__name__)
logger.setLevel(level = logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')

console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(formatter)
logger.addHandler(console)

#设定最大线程数，默认为3
maxpool = 1
#任务线程的执行计数器
pool = 0
#回调线程的执行计数器
cbpool = 0 

lock = threading.Lock()
pm = pmysql.pmy()
imgcategory = imgCategory()
comp = compareImg()

#运行任务，检查数据库中的记录，如果有未处理的任务，则启动线程进行处理；
def run_task():

   while True:

      if(pool < maxpool):
         puuid = uuid.uuid1()
         sql = "update task set status = 'work', etime = now(), try=try+1, workid =%s where status = 'new' limit 1" 
         pm.update(sql, (puuid,))
         sql = "select * from task where workid = %s" 
         result1 = pm.get_one(sql, (puuid,))
         if(result1 != None):
            threading.Thread(target=run_thread, args=(result1[0], result1[1], result1[3], result1[10],)).start()

      if(cbpool < maxpool):
         cbuuid = uuid.uuid1()
         sql = "update callback set status = 'work', etime = now(), try=try+1, workid =%s where status = 'new' limit 1" 
         pm.update(sql, (cbuuid,))
         sql = "select * from callback where workid = %s" 
         result2 = pm.get_one(sql, (cbuuid,))
         if(result2 != None):
            threading.Thread(target=run_callback_thread, args=(result2[0], result2[1], result2[2], result2[3], result2[9],)).start()

      task_manage_thread()
      time.sleep(0.5)

#设置任务的运行状态
def set_task_status(tid, status, info):

   sql = "update task set status = %s, etime = now(), info = %s where tid = %s" 
   pm.update(sql, (status, info, tid))

#设置callback任务的运行状态
def set_callback_task_status(cid, status, info):

   sql = "update callback set status = %s, etime = now(), info = %s where cid = %s" 
   pm.update(sql, (status, info, cid))

#生成回调的任务, 任务编号，回调，结果列表;
def mk_callback_task(tid, callback, rlist, udid):

   puuid = uuid.uuid1()
   sql = "insert into callback (cid, tid, callback, content, ctime, status, udid) values ( %s, %s, %s, %s, now(), 'new', %s)" 
   res = pm.update(sql, (puuid, tid, callback, rlist, udid,))
   if res != 1 :
      raise Exception("增加回调任务错误")

# 为线程定义一个函数
def run_thread(tid, surl, callback, udid):

   ptid = tid
   wpath = "/tmp/newspower/"
   wpath1 = wpath+ptid
   wpath2 = wpath1+"/key"
   global pool
   print("thread is start :thread_pool is"+str(pool))
   lock.acquire()
   pool = pool + 1
   lock.release()
   kf = prepare()
   try:
      #进行视频下载、关键帧截取
      tpath, tfile = kf.download(wpath, surl, ptid)
      kf.qkframe(tpath, tfile)
      comp.keyCompare(wpath1)
      list1 = imgcategory.pathPredict(wpath2)
      list3 = mkResult(list1)
      mk_callback_task(tid, callback, list3, udid)
      set_task_status(tid, "end", "OK")
      #把结果回传给服务器
      upload_to(wpath2+"/category", ptid)
   except Exception as e:
      set_task_status(tid, "fault", str(e))
      logger.error(e)

   finally:
      #需要删除目录;
      rmDWfile(wpath1)
      lock.acquire()
      pool = pool - 1
      lock.release()
      print("thread is end :thread_pool is"+str(pool))

# 运行回调处理线程定义的一个函数
def run_callback_thread(cid, tid, callback, content, udid):

   global cbpool
   print("thread is start :run_callback_thread is"+str(cbpool))
   lock.acquire()
   cbpool = cbpool + 1
   lock.release()
   try:
#定义要提交的数据
      retdata = '{"tid":"'+tid+'", "cid":"'+cid+'", "callback":"'+callback+'", "udid":"'+udid+'","result":"'+content+'", "time":"'+str(datetime.datetime.now())+'"}'
      requests.post(callback, json=retdata)
      set_callback_task_status(cid, "end", "OK")  
      return True   
   except Exception as e:
      set_callback_task_status(cid, "fault", str(e))
      logger.error(e)

   finally:
      lock.acquire()
      cbpool = cbpool - 1
      lock.release()
      print("thread is end :thread_cbpool is"+str(pool))

#分析列表的内容为一个json的结果;
def mkResult(list1):

   r_array = []

   lst = {}
   for l in list1:
      p = l[:-4].split("_")
      category = p[4]
      seq = p[1]
      pts = p[2]
      rjson = {}
      rjson["seq"] = seq
      rjson["pts"] = pts
      rjson["category"] = category
      rjson["property"] = "key"
      #判断该帧是否为应该打点的内容；
      if(not lst):
         lst = deepcopy(rjson)
      if(category == "anchor" or category == "boundary"):
         #如果是主持人或者边界的点，则可以初步认为其是一个打点的位置
         rjson["property"] = "point"
         #如果该点的分类与前一个点的分类相同，且在20帧之内，则认为其是同一个打点位置,则恢复其关键帧的信息;
         if (lst["category"] == category):
            seq1 = int(seq)
            seq2 = int(lst["seq"]) + 20
            if (seq2 > seq1):
               rjson["property"] = "key"
            else:
               lst = deepcopy(rjson)
      r_array.append(rjson)
   
   return  json.dumps(r_array, ensure_ascii=False)

#回传结果给服务器
def upload_to(file_path, tid):
   tarfile ="/tmp/"+tid+".tar"
   try:
      compress_file(tarfile, file_path)
      #调用shell执行命令行回传到服务器上
      subprocess.Popen("/usr/bin/upload.sh "+tarfile, shell=True)
   except Exception as e:
      logger.error(e)

def compress_file(tarfilename, dirname):    # tarfilename是压缩包名字，dirname是要打包的目录
    if os.path.isfile(dirname):
        with tarfile.open(tarfilename, 'w') as tar:
            tar.add(dirname)
    else:
        with tarfile.open(tarfilename, 'w') as tar:
            for root, dirs, files in os.walk(dirname):
                for single_file in files:
                    # if single_file != tarfilename:
                    filepath = os.path.join(root, single_file)
                    tar.add(filepath)


#删除目录
def rmDWfile(file_path):
   shutil.rmtree(file_path)
   return

#任务管理线程，对于当前失败的任务进行处理，每1分钟一次，如果尝试三次都不能够处理，则置为失败,
#状态说明：状态1、new 2、fault、3、work 4, end 9、over 其中over就是失败后不处理的任务，正常任务结束的标志是end
def task_manage_thread():

   #把不够三次的失败任务置为可再次尝试的状态
   sql = "update task set status = 'new' where try < 4 and status = 'fault' and now() > SUBDATE(etime, interval -1 minute)"
   pm.update(sql, ())
   #把不超三次的失败任务置为永久失败的状态
   sql = "update task set status = 'over' where try > 3 and status = 'fault'"
   pm.update(sql, ())
   sql = "update callback set status = 'new' where try < 4 and status = 'fault' and now() > SUBDATE(etime, interval -1 minute)"
   pm.update(sql, ())
   #把不超三次的失败任务置为永久失败的状态
   sql = "update callback set status = 'over' where try > 3 and status = 'fault'"
   pm.update(sql, ())

if __name__ == "__main__":
   run_task()
