#encoding:utf-8
from __future__ import unicode_literals
from __future__ import print_function

import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.websocket

from tornado.options import define, options
import os
from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
from apscheduler.schedulers.tornado import TornadoScheduler
from apscheduler.executors.pool import ThreadPoolExecutor
from apscheduler.triggers.interval import IntervalTrigger
from apscheduler.triggers.date import DateTrigger
from apscheduler.triggers.cron import CronTrigger
from apscheduler.events import EVENT_ALL, JobEvent, SchedulerEvent,\
    EVENT_SCHEDULER_START, EVENT_JOB_ERROR,\
    EVENT_JOB_MODIFIED, EVENT_JOB_REMOVED, EVENT_JOB_ADDED, EVENT_JOB_EXECUTED,\
    EVENT_JOB_MISSED
import logging
import sys
import json
from apscheduler.jobstores.base import JobLookupError
import redis
import MySQLdb
from tornado.process import Subprocess
from scrapy.utils.project import get_project_settings
from collections import namedtuple

define("port", default=8888, help="run on the given port", type=int)


class AddJobHandler(tornado.web.RequestHandler):
    def post(self):
        crontname = self.get_argument('name')
        sched = self.get_argument('schedule')
        _id = self.get_argument('_id')
        minute, hour, day, limit_day, exceed_day = sched.split(' ')
        try:            
            if crontname == 'INTERVAL':
                minute = int(minute)
                if minute <= 0:
                    if _id:
                        scheduler.remove_job(_id)
                    return self.write("delete job")
                trigger = IntervalTrigger(seconds=minute*60)
            elif crontname == 'DAY':
                # minute, hour = sched.split(' ')
                minute = int(minute)
                if minute < 0:
                    if _id:
                        scheduler.remove_job(_id)
                    return self.write("delete job")
                trigger = CronTrigger(hour=hour,
                                      minute=minute)
            else:
                self.set_status(400)
                return self.write(json.dumps({'msg': 'unknown jobid none.'}))
        except ValueError as e:
            self.set_status(400)
            return self.write("value error %s" % e)
        kwargs = {}
        kwargs['minute'] = minute
        kwargs['hour'] = hour
        kwargs['interval_day'] = day
        kwargs['limit_day'] = limit_day
        kwargs['exceed_day'] = exceed_day
        kwargs['crontname'] = crontname
        if _id:
            scheduler.remove_job(_id)
            # job = scheduler.modify_job(_id, trigger=trigger, name=crontname, kwargs=kwargs)
        
        job = scheduler.add_job(job_func, trigger=trigger,name=crontname, kwargs=kwargs)
        
        return self.write(json.dumps({'jobid': job.id}))
        
# def job_interval():
#     cur.execetu('SELECT number FROM SBJ_Crawlerresult WHERE datetime_column < DATE_SUB(NOW(), INTERVAL 30 MINUTE)')
#     print("hello")
    
def job_func(**kwargs):
    con = MySQLdb.connect(host=host, port=port, user=user, passwd=passwd, db=db)
    con.autocommit(True);
    # con = MySQLdb.connect(options.mysql_host)
    cur = con.cursor()
    crontname = kwargs['crontname']
    if crontname == 'INTERVAL':
        minute = kwargs['minute']
        if int(minute) > 0:
            interval = int(minute) + 60 
            len = cur.execute('SELECT ANO FROM SBJ_Crawlerresult WHERE FirstRecordTime > DATE_SUB(NOW(), INTERVAL %s MINUTE)' % interval)
            logger.info("%d 分钟内新增号码 %d" % (minute, len))
            # res = cur.fetchall()
            for number in cur:
                redis_server.lrem('haoma', 0, number[0])
                redis_server.lpush('haoma', number[0])
        #con.commit()
    elif crontname == 'DAY':
        day = kwargs['interval_day']
        limit_day = kwargs['limit_day']
        exceed_day = kwargs['exceed_day']
        # 当天查询无结果，再次查询
        #len = cur.execute("SELECT ANO FROM SBJ_Crawlerresult WHERE FirstRecordTime > DATE_SUB(NOW(), INTERVAL 25 hour) and MonActive=1 and Crawler_result='none'")
        # len = cur.execute("SELECT ANO FROM SBJ_Crawlerresult WHERE FirstRecordTime > DATE_SUB(NOW(), INTERVAL 25 hour) and (Crawler_result is null or Crawler_result='none')")
        len = cur.execute("SELECT ANO, ACL, CALLSUM, FROM SBJ_Crawlerresult WHERE search_time is null")
        logger.info("未查询号码%s" % len)
        for number in cur:
            acl = int(number[1])
            callsum = int(number[2])
            if acl == 1 or acl ==2 or acl ==3:
                redis_server.zadd('acl_1', callsum, number[0])
            else:
                redis_server.zadd('acl_2', callsum, number[0])
        #con.commit()
            
        # 查询时间间隔  x 天， 且第一次记录时间为 y 天内   
        if int(limit_day) > 0:
            len = cur.execute("""
                SELECT ANO FROM SBJ_Crawlerresult
                WHERE DATE(search_time) = DATE(NOW() - INTERVAL %s DAY) and FirstRecordTime > NOW() - INTERVAL %s DAY and MonActive=1
            """, (day, limit_day))
            logger.info("%s天内 间隔  %s天重复查询的号码 %s" % (limit_day, day, len))
            for number in cur:
                redis_server.lrem('haoma', 0, number[0])
                redis_server.lpush('haoma', number[0])
            #con.commit()
            
        #查询数据大于 x 天， 且 monactive = 1
        if int(exceed_day) > 0: 
            len = cur.execute("""
                SELECT ANO FROM SBJ_Crawlerresult
                WHERE search_time < NOW() - INTERVAL %s DAY and MonActive=1
            """, exceed_day)
            logger.info("查询间隔大于%s 天 且monacative=1 %s" % (exceed_day, len))
            for number in cur:
                redis_server.lrem('haoma', 0, number[0])
                redis_server.lpush('haoma', number[0])
        con.close()
        #con.commit()
        

class DelJobHandler(tornado.web.RequestHandler):
    def post(self):
        jobids = self.get_argument('jobid')
        if not jobids:
            logger.warn('删除时未指定jobid')
            self.set_status(400)
            return self.write(json.dumps({'msg': 'unknown jobid none.'}))
        jobid = jobids[0]
        try:
            scheduler.remove_job(jobid)
        except JobLookupError as _:
            logger.warn('删除已不存在的任务，默认成功')
        return json.dumps({'msg': 'ok'})

        
class MainHandler(tornado.web.RequestHandler):
    def get(self):
        jobs = scheduler.get_jobs()
        Job = namedtuple("JOB", ['next_run_time', 'id', 'kwargs'])
        kwargs = {}
        kwargs['minute'] = 0
        kwargs['hour'] = 0
        kwargs['interval_day'] = 0
        kwargs['limit_day'] = 0
        kwargs['exceed_day'] = 0
        kwargs['crontname'] = 0
        job1 = Job("","",json.dumps(kwargs))
        job2 = Job("","",json.dumps(kwargs))
        for job in jobs:
            job.kwargs = json.dumps(job.kwargs)
            logger.info(job)
            if job.name == 'INTERVAL':
                job1 = job
            elif job.name == 'DAY':
                job2 = job
        self.render('index.html',job1=job1,job2=job2)
        

def ev_listener(ev):
    if isinstance(ev, JobEvent):
        logger.info('JobEvent, code: [%d]', ev.code)
        if ev.code == EVENT_JOB_ADDED:
            logger.info('任务已添加')
        elif ev.code == EVENT_JOB_REMOVED:
            logger.info('任务已被移除 [%s]', ev.job_id)
        elif ev.code == EVENT_JOB_MODIFIED:
            logger.info('任务被修改')
        elif ev.code == EVENT_JOB_EXECUTED:
            logger.info('任务被执行')
        elif ev.code == EVENT_JOB_ERROR:
            logger.info('任务执行出错')
        elif ev.code == EVENT_JOB_MISSED:
            logger.info('任务已错过')
    elif isinstance(ev, SchedulerEvent):
        if ev.code == EVENT_SCHEDULER_START:
            logger.info('调度器启动成功')

      
def set_scheduler():
    dburl = 'sqlite:///./db.sqlite3'

    logger.info('定时进程开启')
    jobstores = {'default': SQLAlchemyJobStore(url=dburl)}
    executors = {'default': ThreadPoolExecutor(20)}
    scheduler = TornadoScheduler(jobstores=jobstores, executors=executors)
    scheduler.add_listener(ev_listener, EVENT_ALL)
    logger.info('调度器已开启, 当前任务数: [%d]', len(scheduler.get_jobs()))
    scheduler.start()
    return scheduler


class LogHandler(tornado.web.RequestHandler):
    def get(self):
        self.render('log.html')

class LogStreamer(tornado.websocket.WebSocketHandler):
    def open(self):
        filename = "../haoma.log"
        self.proc = Subprocess(["tail", "-f", filename, "-n", "0"],
                               stdout=Subprocess.STREAM,
                               bufsize=1)
        self.proc.set_exit_callback(self._close)
        self.proc.stdout.read_until("\n", self.write_line)

    def _close(self, *args, **kwargs):
        self.close()

    def on_close(self, *args, **kwargs):
        logging.info("trying to kill process")
        self.proc.proc.terminate()
        self.proc.proc.wait()

    def write_line(self, data):
        logging.info("Returning to client: %s" % data.strip())
        self.write_message(data.strip() + "<br/>")
        self.proc.stdout.read_until("\n", self.write_line)


class DelQueueHandler(tornado.web.RequestHandler):
    def get(self):
        redis_server.delete('haoma')
        self.write('<script>alert("队列已清空"); window.location.href="/"</script>')
     

def main():
    tornado.options.parse_command_line()
    static_path = "./static"
    application = tornado.web.Application(
                                          
        handlers = [
        (r'/', MainHandler),
        (r"/Add", AddJobHandler),
        (r"/Del", DelJobHandler),
        (r'/clearqueue', DelQueueHandler),
        (r"/Log", LogHandler),
        (r"/tail", LogStreamer), 
        (r'/static/(.*)', tornado.web.StaticFileHandler, {'path': static_path})],
        template_path=os.path.join(os.path.dirname(__file__), "templates"))
    http_server = tornado.httpserver.HTTPServer(application)
    http_server.listen(options.port, '0.0.0.0')
    tornado.ioloop.IOLoop.current().start()


if __name__ == "__main__":
    # logger.
    logger = logging.getLogger('scheduler')
    fmt = logging.Formatter("%(asctime)s %(message)s")
    consolehdl = logging.StreamHandler()
    consolehdl.setFormatter(fmt)
    filehdl = logging.FileHandler('%s.log' %
                                  os.path.basename(sys.argv[0]).split('.')[0])
    filehdl.setFormatter(fmt)
    logger.addHandler(filehdl)
    logger.addHandler(consolehdl)
    logger.setLevel(logging.DEBUG)
    
    # scheduler
    scheduler = set_scheduler()
    
    # redis
    SETTINGS = get_project_settings()
    redis_host = SETTINGS['REDIS_HOST']
    redis_port = SETTINGS['REDIS_PORT']
    redis_server = redis.StrictRedis(host=redis_host, port=redis_port)
    baidu_url =  "http://www.baidu.com/s?wd="
    haosou_url = "https://www.so.com/index.php?a=index&q="
    #databse
    #tornado.options.parse_command_line()
    
    host = SETTINGS['DB_HOST']
    user = SETTINGS['DB_USER']
    passwd = SETTINGS['DB_PASSWD']
    port = SETTINGS['DB_PORT']
    db = SETTINGS['DB_DB']

    main()
