# uncompyle6 version 3.9.0
# Python bytecode version base 2.7 (62211)
# Decompiled from: Python 3.8.3 (tags/v3.8.3:6f8c832, May 13 2020, 22:37:02) [MSC v.1924 64 bit (AMD64)]
# Embedded file name: run.py
# Compiled at: 2019-08-30 19:49:01
"""
文件名：run.py
功能：爬虫多线程执行文件

代码历史：
2014-02-07：贺伟刚，创建代码框架
2014-02-26：庞  威  代码补充
"""
import gevent.pool, gevent.queue, gevent.event
from gevent import monkey
import os, sys, imp, json, time, signal, random, urllib2, inspect, requests, urlparse, traceback
from collections import defaultdict
try:
    import log
except:
    try:
        re = os.popen('sudo cp /work/spider/spider_old.conf /work/spider/spider.conf')
    except:
        re = os.popen('sudo cp spider_old.conf spider.conf')

    r = re.read()
    print '出错啦，请尝试重新测试...'
    sys.exit()

import util, setting, argsparser, version
__version__ = version.__version__
eventExit = None
cmd_args = None

def handle_spidercode_error(e, func=None):
    _log = log.make_spidercode_log(exc_text=traceback.format_exc(), extra={'func_name': getattr(func, '__name__', '')})
    log.logger.error(_log)


def get_config_content(data):
    """从本地或者远程获取配置文件内容"""
    config = data.pop('config_content', '').encode('utf-8')
    if config:
        return config
    config_id = data.get('config_id')
    if not config_id:
        log.logger.error('获取config_id 失败: %s' % data)
        return ''
    last_modify_time = int(data.get('last_modify_time', 0))
    config_data = {}
    local_file = os.path.join('/tmp/', '%s.data' % config_id)
    try:
        with open(local_file) as (f):
            config_data = json.loads(f.read())
    except Exception as e:
        log.logger.exception(e)

    remote_flag = 0
    if not config_data:
        remote_flag = 1
    else:
        local_last_modify_time = config_data.get('last_modify_time', -1)
        if local_last_modify_time != last_modify_time:
            remote_flag = 1
        if remote_flag:
            try:
                config_content = requests.get(setting.GET_CONFIG_CONTENT_FROM % config_id).json().get('config_content', '')
                if config_content:
                    config_data = {'config_content': config_content, 'last_modify_time': last_modify_time}
                    with open(local_file, 'w') as (f):
                        f.write(json.dumps(config_data, ensure_ascii=False).encode('utf8'))
            except Exception as e:
                log.logger.exception(e)

        if config_data:
            config_content = config_data.get('config_content', '').encode('utf8')
            return config_content
    return ''


def load_module(url, spider_id=None, worker_id=None, name='spider', add_to_sys_modules=0):
    """
    动态加载py模块。url为要加载的模块地址，支持http, ftp及本地操作；
    参数name是加载后的模块名称，不能使包的形式， 因此，name='spider.sina'是不允许的；
    参数add_to_sys_modules=1表示将新建模块加入到sys.module; 设置为0则表示不加入；
    """
    newurl_lower = url.lower()
    if newurl_lower.startswith('http://') or newurl_lower.startswith('https://'):
        try:
            url = url % (spider_id, worker_id)
            response = requests.get(url, timeout=15)
        except Exception as e:
            log.logger.error('run.py:load_module(): url:%s, Exception:%s' % (url, e))
            time.sleep(60)
            return (None, None)

        try:
            if response:
                data = response.json()
            else:
                if response.status_code in (500, ):
                    log.logger.error('dispatch service error, waiting !!!')
                    time.sleep(10)
                response.raise_for_status()
        except Exception as e:
            log.logger.error('json.load() failed; excepiton: %s; url:%s' % (e, url))
            return (None, None)

        try:
            if not data:
                time.sleep(60)
                return (None, None)
            config = get_config_content(data)
        except Exception as e:
            log.logger.error("config_content.encode('utf8') failed; excepiton: %s" % e)
            return (None, None)

        if not config:
            time.sleep(60)
            return (None, None)
        try:
            code = compile(config, '', 'exec')
        except Exception as e:
            log.logger.error('-- compile failed --; config_id:%s; excepiton: %s' % (data.get('config_id', '-1'), e))
            return (None, None)

        module = imp.new_module(name)
        try:
            exec code in module.__dict__
        except Exception as e:
            log.logger.exception(e)
            log.logger.error('-- exec code in module.__dict__ failed --; config_id:%s; excepiton: %s' % (
             data.get('config_id', '-1'), e))
            return (None, None)

        if add_to_sys_modules:
            sys.modules[name] = module
        return (data, module)
    else:
        if not newurl_lower.startswith('file:///'):
            url = 'file:///%s' % url
        try:
            data = urllib2.urlopen(url).read()
        except Exception as e:
            log.logger.error('run.py:load_module(): url:%s, Exception:%s' % (url, e))
            return (None, None)

        config = data
        try:
            code = compile(config, '', 'exec')
        except Exception as e:
            log.logger.error('-- compile failed --; config_id:%s; excepiton: %s' % (data.get('config_id', '-1'), e))
            return (None, None)

        module = imp.new_module(name)
        try:
            exec code in module.__dict__
        except Exception as e:
            log.logger.exception(e)
            log.logger.error('-- exec code in module.__dict__ failed --; config_id:%s; excepiton: %s' % (
             data.get('config_id', '-1'), e))
            return (None, None)

        if add_to_sys_modules:
            sys.modules[name] = module
        return ({}, module)
        return


def zhongbao_add(url, data_dict, r_user, r_time):
    import redis
    redis_db = redis.StrictRedis.from_url('redis://redis-spider-cooperation-1.istarshine.net.cn/3')
    if r_user == 'wego':
        k = 'zhongbao:hash_zhongbao_urls'
        dedup = 'zhongbao:zset_zhongbao_list_urls'
    else:
        k = 'zhongbao_agent:hash_zhongbao_urls'
        dedup = 'zhongbao_agent:zset_zhongbao_list_urls'
    data_dict = json.dumps(data_dict)
    redis_db.hset(k, url, data_dict)
    redis_db.zadd(dedup, {url: time.time()})
    pipe = redis_db.pipeline()
    zhongbao_del = redis_db.zrangebyscore(dedup, 0, time.time() - r_time)
    for i in zhongbao_del:
        pipe.zrem(dedup, i)
        pipe.hdel(k, i)

    try:
        pipe.execute()
    except:
        pass


def get_detail_page_urls(spider, urls, func, detail_job_queue):
    """
    根据该列表urls中的入口地址，分析获取详情页信息，将获取的详情页url放到详情页抓取任务队列中;
    方法相当于数的遍历；
    参数func表示参数urls说指向的网页分析函数；返回值为(list_urls, callback, next_page_url)
    其中，list_urls表示从当前页分析得到的url列表， callback表示list_urls网页的分析函数，next_page_url
    表示从当前页分析出的下一页地址，如果不分析下一页信息，改值为None
    参数detail_job_queue表示详情页队列； 分析出来的详情页信息将会放入此队列中；
    """
    global cmd_args
    new_spider_id = cmd_args.__dict__.get('ZHONGBAO')
    data_dict = dict()
    config_data = new_spider_id.split('__')
    r_user = 'wego'
    r_time = 259200
    r_run = 'all'
    for cd in config_data:
        pk = cd.split(':')[0]
        pv = cd.split(':')[-1]
        if str(pk) == 'redis_time':
            r_time = int(str(pv))
        elif str(pk) == 'user_type':
            r_user = str(pv)
        elif str(pk) == 'run_type':
            r_run = str(pv)
        else:
            data_dict.update({str(pk): str(pv)})

    if func is not None:
        if urls:
            for request in urls:
                url = request.get('url') if isinstance(request, dict) else request
                log.logger.debug('downloading list page ...%s' % url)
                response = spider.download(request, func_name='get_start_urls')
                try:
                    if 'url' in inspect.getargspec(func).args:
                        list_urls, callback, next_page_url = func(response, request)
                    else:
                        list_urls, callback, next_page_url = func(response)
                except Exception as e:
                    e_detail = traceback.format_exc()
                    if getattr(cmd_args, 'debug', None):
                        log.logger.exception(util.R(e_detail))
                    exc_dic = {'detail': e_detail, 'url': url, 'e_name': util.get_type_str(e)}
                    spider.exceptions_info_list.append(exc_dic)
                    list_urls, callback, next_page_url = [], None, None
                    log.logger.error('-- %s , config_id: %s, exception: %s' % (func, spider.config_id, e))

                spider.check_url_list(list_urls, url)
                get_detail_page_urls(spider, list_urls, callback, detail_job_queue)
                data_dict.update({'is_detail': False})
                data_dict.update({'detail_urls': list(list_urls)})
                if r_run != 'all':
                    zhongbao_add(url, data_dict, r_user, r_time)
                else:
                    data_dict.update({'run_type': 'all'})
                    zhongbao_add(url, data_dict, r_user, r_time)
                if next_page_url is not None:
                    get_detail_page_urls(spider, [next_page_url], func, detail_job_queue)

    else:
        if urls is None:
            urls = []
            log.logger.error('-- urls is None, config_id: %s ' % spider.config_id)
        for request in urls:
            url, _request = (request.get('url'), request) if isinstance(request, dict) else (request, '')
            if not isinstance(url, basestring):
                continue
            spider.increase_total_data_num()
            if spider.urldedup is not None:
                try:
                    if spider.urldedup.is_dedup(url):
                        continue
                except Exception as e:
                    log.logger.exception(e)

            data_dict.update({'is_detail': True})
            if r_run != 'all':
                zhongbao_add(url, data_dict, r_user, r_time)
                spider.increase_parsed_success_num()
            else:
                detail_job_queue.put((spider, request))
            spider.increase_new_data_num()
            if getattr(cmd_args, 'debug', None):
                print ' *** new detail url is: %s %s' % (url, _request)

    return


@util.keepalive(handle_func=handle_spidercode_error)
def list_page_thread(eventExit, detail_job_queue, name, crawler_data_queue):
    """
    列表页工作线程，不断向任务服务器获取配置脚本
    """
    while 1:
        if eventExit.isSet():
            log.logger.debug('---***--- list threads finished !!!')
            break
        else:
            data, mod = load_module(setting.GET_SPIDER_CONFIG_FROM, setting.SPIDER_ID, name)
            if mod is not None:
                try:
                    dispatch_repeat_times = int(data.get('repeat_times', 1))
                except Exception as e:
                    log.logger.exception(e)
                    dispatch_repeat_times = 1

                repeat_times = max(getattr(setting, 'REPEAT_TIMES', 1), dispatch_repeat_times)
                if getattr(cmd_args, 'debug', None):
                    repeat_times = 1
                while repeat_times > 0:
                    if eventExit.isSet():
                        break
                    try:
                        spider = mod.MySpider(cmd_args=cmd_args)
                        spider._conf_info = data
                        spider.init_setting()
                        spider.set_data_queue(crawler_data_queue)
                        spider.set_detail_page_queue(detail_job_queue)
                        spider.init_dedup()
                        spider.init_downloader()
                    except Exception as e:
                        log.logger.error('-- init spider failed; config_id: %s , %s' % (data.get('config_id', ''), e))
                        log.logger.exception(e)
                        repeat_times -= 1
                        continue

                    if spider.debug:
                        print 'spider: proxy_enable: %s' % spider.proxy_enable
                        print 'spider: proxy_max_num: %s' % spider.proxy_max_num
                        print 'spider: timeout: %s' % spider.timeout
                        print 'spider: data_db: %s' % spider.data_db
                        print 'spider: log_db: %s' % spider.log_db
                        print 'spider: dedup_uri: %s' % spider.dedup_uri
                        print 'spider: dedup_key: %s' % spider.dedup_key
                    if data is not None:
                        job_id = str(data.get('job_id', '-1'))
                        config_id = str(data.get('config_id', '')) or '%s' % setting.SPIDER_ID
                        config_name = data.get('savename', '').encode('utf8')
                        spider.set_job_id(job_id)
                        spider.set_config_id(config_id)
                        spider.set_config_name(config_name)
                        spider.set_spider_id(setting.SPIDER_ID)
                        spider.set_worker_id(name)
                        limit = data.get('limit', 1)
                        post_data = {'spider_id': setting.SPIDER_ID, 'worker_id': name, 
                           'config_id': config_id, 
                           'limit': limit}
                    try:
                        start_urls = spider.get_start_urls(post_data)
                    except Exception as e:
                        start_urls = []
                        exc_dic = {'detail': traceback.format_exc(), 'url': '', 'e_name': util.get_type_str(e)}
                        spider.exceptions_info_list.append(exc_dic)
                        log.logger.error('-- get_start_urls failed; config_id: %s , %s' % (config_id, e))

                    get_detail_page_urls(spider, start_urls, spider.parse, detail_job_queue)
                    spider.set()
                    res = spider.spider_finished()
                    if not spider.job_event.wait(timeout=1800):
                        log.logger.error('config_id: %s, job_event timeout exception!!!' % config_id)
                    if res:
                        del spider
                    repeat_times -= 1

                if getattr(cmd_args, 'debug', None):
                    eventExit.set()
            time.sleep(1.0)

    return


@util.keepalive(handle_func=handle_spidercode_error)
def detail_page_thread(eventExit, job_queue):
    """
    详细页下载线程; 当任务队列发生Empty异常时，如果eventExit设置为True，则退出；
    """
    while 1:
        try:
            spider, request = job_queue.get(True, 1)
        except Exception as e:
            if eventExit.isSet():
                break
            continue

        if isinstance(request, dict):
            now = time.time()
            try:
                crawl_time = request.get('crawl_time', 0)
                if crawl_time:
                    if now < crawl_time:
                        time.sleep(0.1)
                        job_queue.put((spider, request))
                        continue
                    else:
                        request.pop('crawl_time')
                else:
                    delay = request.get('delay', 0)
                    if delay:
                        time.sleep(0.1)
                        request['crawl_time'] = now + delay
                        job_queue.put((spider, request))
                        continue
            except Exception as e:
                log.logger.exception(e)

        result, next_urls = spider.parse_detail_by_url(request)
        if next_urls:
            for url in next_urls:
                job_queue.put((spider, url))

        if result:
            del spider
        job_queue.task_done()


@util.keepalive(handle_func=handle_spidercode_error)
def data_queue_thread(eventExit, crawler_data_queue):
    """
    """
    data_senders = {}
    while 1:
        try:
            data = crawler_data_queue.get(True, 1)
            if not data:
                continue
        except Exception as e:
            if eventExit.isSet():
                for sender in data_senders.values():
                    sender.close()

                break
            continue

        if isinstance(data, dict):
            data = [
             data]
        elif isinstance(data, list):
            data_list = defaultdict(list)
            for item in data:
                data_db = item.pop('data_db', None)
                if data_db is not None:
                    data_list[data_db].append(item)

            for data_db, data in data_list.items():
                sender = None
                if data_db in data_senders:
                    sender = data_senders[data_db]
                else:
                    try:
                        if data_db:
                            sender = data_senders.setdefault(data_db, util.from_url(data_db))
                        else:
                            log.logger.error('not data_db: %s' % data_db)
                    except Exception as e:
                        log.logger.error('init sender failed: %s, %s' % (e, data_db))
                        continue

                if sender:
                    try:
                        sender.send(data)
                    except Exception as e:
                        log.logger.error('send data failed: %s, %s' % (e, data_db))

        crawler_data_queue.task_done()

    return


@util.keepalive(handle_func=handle_spidercode_error)
def kafka_produce_thread(eventExit):
    """
    kafka 日志收集线程
    """
    try:
        from confluent_kafka import Producer
    except ImportError:
        return

    def random_name():
        import random, md5, time
        f_name = md5.md5(('{}{}').format(time.time(), random.random())).hexdigest()
        return ('temp_{}').format(f_name)

    kafka_server = getattr(setting, 'KAFKA_SERVER', '192.168.16.62:9092')
    default_topic = getattr(setting, 'KAFKA_TOPIC', 'spider')
    kafka_producer = Producer({'bootstrap.servers': kafka_server, 
       'message.send.max.retries': 1, 
       'queue.buffering.max.messages': 100000, 
       'compression.codec': 'gzip'})
    log_name = '/work/log/bak.log.1'
    private_log_name = '/work/log/' + random_name()
    private_log_name_bak = private_log_name + '.bak'
    while 1:
        if eventExit is not None and eventExit.isSet():
            break
        msgs = []
        if len(kafka_producer) > 10000:
            log.logger.debug('kafka_producer flush')
            kafka_producer.flush()
            continue
        if os.path.exists(log_name):
            try:
                os.rename(log_name, private_log_name)
            except Exception as e:
                log.logger.exception(e)

        if os.path.exists(private_log_name):
            with open(private_log_name) as (f):
                msgs = f.readlines()
            os.rename(private_log_name, private_log_name_bak)
        if msgs:
            for msg in msgs:
                topic = ''
                try:
                    dict_msg = json.loads(msg)
                    topic = dict_msg.get('kafka_topic', '')
                except:
                    pass

                if not topic:
                    continue
                try:
                    kafka_producer.produce(topic, msg)
                except BufferError as e:
                    log.logger.exception(e)
                    kafka_producer.poll(0)
                except Exception as e:
                    log.logger.error(('Kafka Error: {}').format(e))

            log.logger.info(('kafka_length: {} file: {}').format(len(kafka_producer), private_log_name))
        else:
            time.sleep(0.1)

    if os.path.exists(private_log_name_bak):
        os.remove(private_log_name_bak)
    return


def run_spider():
    """
    爬虫多线程执行主体函数
    """
    global eventExit
    monkey.patch_all()
    signal.signal(signal.SIGTERM, stop_spider)
    kafka_produce_thread_num = getattr(setting, 'KAFKA_HANDLE_THREAD_NUM', 2)
    counter = 0
    eventExit = gevent.event.Event()
    detail_job_queue = gevent.queue.JoinableQueue()
    crawler_data_queue = gevent.queue.JoinableQueue()
    list_thread_pool = gevent.pool.Pool(setting.LIST_PAGE_THREAD_NUM)
    detail_page_thread_pool = gevent.pool.Pool(setting.DETAIL_PAGE_THREAD_NUM)
    data_queue_thread_pool = gevent.pool.Pool(1)
    for _ in xrange(setting.LIST_PAGE_THREAD_NUM):
        time.sleep(random.random())
        counter += 1
        list_thread_pool.spawn(list_page_thread, eventExit, detail_job_queue, counter, crawler_data_queue)

    interval = getattr(setting, 'LIST_DETAIL_INTERVAL', 60)
    time.sleep(interval)
    for _ in range(setting.DETAIL_PAGE_THREAD_NUM):
        detail_page_thread_pool.spawn(detail_page_thread, eventExit, detail_job_queue)

    for _ in range(setting.DATA_QUEUE_THREAD_NUM):
        data_queue_thread_pool.spawn(data_queue_thread, eventExit, crawler_data_queue)

    eventExit.wait()
    try:
        try:
            timeouter = gevent.Timeout(setting.EXIT_TIMEOUT)
            timeouter.start()
            list_thread_pool.join()
            detail_page_thread_pool.join()
            data_queue_thread_pool.join()
        except gevent.Timeout as e:
            log.logger.debug('internal timeout triggered: %s' % e)

    finally:
        timeouter.cancel()

    log.logger.info('---***--- all work finished!!!')


def stop_spider(signum, frame):
    """
    结束爬虫所有线程；立即结束获取配置线程；抓取详情页线程需完成detail_job_queue队列中所有任务才能退出；
    """
    eventExit.set()
    log.logger.debug(' ---***--- stop_spider() called !!! ')


def web_interface():
    """
    """
    import bottle
    from bottle import route, run, error

    @error(404)
    def error404(error):
        return '404'

    @route('/status')
    def status():
        return '1'

    run(host='')


def multi_process_runner():
    """
    多进程模式
    """
    import multiprocessing
    process_pool = []
    last_restart_time = 0
    eventExit = gevent.event.Event()

    def stop_process(signum, frame):
        eventExit.set()
        for p in process_pool:
            p.terminate()

    signal.signal(signal.SIGTERM, stop_process)
    if not before_runing(eventExit):
        return
    for _ in xrange(getattr(setting, 'PROCESS_NUM', 2)):
        p = multiprocessing.Process(target=run_spider)
        p.start()
        process_pool.append(p)

    time.sleep(5)
    restart_flag = 0
    while not eventExit.is_set():
        restart_flag = need_restart(last_restart_time)
        heartbeat()
        if restart_flag:
            for p in process_pool:
                p.terminate()

            try:
                try:
                    exit_timeout = getattr(setting, 'EXIT_TIMEOUT', 300)
                    timeouter = gevent.Timeout(exit_timeout)
                    timeouter.start()
                    while 1:
                        for p in process_pool:
                            if p.is_alive():
                                break
                        else:
                            break

                        time.sleep(1)

                except gevent.Timeout as e:
                    log.logger.debug('internal timeout triggered: %s' % e)

            finally:
                timeouter.cancel()

            for p in process_pool:
                if p.is_alive():
                    os.kill(p.pid, 9)

            if restart_flag == 2:
                log.logger.info('spider version update, process restarting')
                break
            process_pool = []
            eventExit = gevent.event.Event()
            for _ in xrange(setting.PROCESS_NUM):
                p = multiprocessing.Process(target=run_spider)
                p.start()
                process_pool.append(p)

            last_restart_time = time.time()
            log.logger.debug('restart process successed')
        time.sleep(60)

    for p in process_pool:
        p.join()

    log.logger.debug('---***--- All work finished!!!')
    return restart_flag


class App(object):
    """
    该类定义了守护进程中的执行对象
    """

    def __init__(self):
        self.stdin_path = setting.STDIN_PATH
        self.stdout_path = setting.STDOUT_PATH
        self.stderr_path = setting.STDERR_PATH
        self.pidfile_path = setting.PIDFILE_PATH
        self.pidfile_timeout = setting.PIDFILE_TIMEOUT

    def run(self):
        """
        守护进程中执行体
        """
        flag = multi_process_runner()
        if flag == 2:
            sys.argv[1] = '--start'
            args = tuple([sys.executable] + sys.argv)
            setting.config.set('daemon_app', 'stdin_path', '/dev/null')
            setting.config.set('daemon_app', 'stdout_path', '/dev/null')
            setting.config.set('daemon_app', 'stderr_path', '/dev/null')
            with open(setting.conf_file, 'w') as (f):
                setting.config.write(f)
            os.spawnve(os.P_NOWAIT, sys.executable, args, os.environ)

    def stop(self):
        """
        守护进程结束前执行动作；
        """
        stop_spider()


def reset_setting_config(args):
    """
    """
    if args:
        config_url = getattr(args, 'GET_SPIDER_CONFIG_FROM', setting.GET_SPIDER_CONFIG_FROM)
        if not config_url:
            config_url = setting.GET_SPIDER_CONFIG_FROM
        if config_url:
            config_url = config_url.lower()
            if cmd_args.debug or not config_url.startswith('http') or not config_url.startswith('https'):
                args.LIST_PAGE_THREAD_NUM = 1
        for key, value in args.__dict__.iteritems():
            if value is not None:
                if key in setting.__dict__:
                    if args.debug:
                        log.logger.info('before: %s: %s' % (key, setting.__dict__[key]))
                    setting.__dict__[key] = value
                    if args.debug:
                        log.logger.info('after: %s: %s' % (key, value))

    return


def before_runing(eventExit):
    """检查是否存在爬虫id和入库去重等 不存在则申请"""
    p = urlparse.urlparse(setting.GET_SPIDER_CONFIG_FROM)
    new_spider_id = ''
    if True:
        log.logger.info('get new spider_id start')
        spider_name = os.getenv('spider_name') or os.getenv('HOSTNAME')
        if not spider_name:
            log.logger.error('enviroment variable HOSTNAME not found or not value')
            return 0
        if not setting.ADD_SPIDER_FROM:
            log.logger.error('not found ADD_SPIDER_FROM')
            return 0
        while 1:
            new_spider_id = cmd_args.__dict__.get('ZHONGBAO')
            if not new_spider_id:
                log.logger.error('add spider failed sleep 1 minutes and retrying')
                time.sleep(60)
                continue
            else:
                setting.config.set('spider', 'spider_id', new_spider_id)
                with open(setting.conf_file, 'w') as (f):
                    setting.config.write(f)
                try:
                    reload(setting)
                except:
                    try:
                        re = os.popen('sudo cp /work/spider/spider_old.conf /work/spider/spider.conf')
                    except:
                        re = os.popen('sudo cp spider_old.conf spider.conf')

                    r = re.read()
                    print '出错啦，请尝试重新测试...'
                    sys.exit()

                log.logger.info('get new spider_id success')
                break

    heartbeat()
    return 1


def need_restart(last_restart_time):
    """判断是否应该重启爬虫"""
    if time.time() - last_restart_time < 60:
        return 0
    reload(version)
    if version.__version__ != __version__:
        return 2
    if version.force_restart == 1:
        log.logger.info('force restart child process')
        try:
            os.system("sed -i 's/force_restart = 1/force_restart = 0/' /work/spider/version.py")
        except Exception as e:
            log.logger.info('modify version.force_restart failed !!!')
            log.logger.exception(e)

        return 1
    restart_hour, restart_minute = setting.RESTART_TIME.split(':')
    if int(time.strftime('%H')) == int(restart_hour) and int(time.strftime('%M')) == int(restart_minute):
        log.logger.debug("it's time to restart process")
        return 1
    with os.popen("cat /proc/meminfo|awk '{print $1 $2}'") as (f):
        info = f.readlines()
    try:
        info = dict([ x.strip().split(':') for x in info ])
        MemTotal = int(info.get('MemTotal'))
        MemFree = int(info.get('MemFree'))
        Cached = int(info.get('Cached'))
        if (MemFree + Cached) * 1.0 / MemTotal < 0.1:
            log.logger.debug('memory used over limit：80%  starting restart process')
            return 1
    except Exception as e:
        log.logger.exception(e)

    return 0


def heartbeat():
    """爬虫心跳函数"""
    if not setting.SPIDER_HEARTBEAT_FROM:
        log.logger.debug('not heartbeat url')
        return 0
    heartbeat_url = setting.SPIDER_HEARTBEAT_FROM % setting.SPIDER_ID
    try:
        resp = requests.get(heartbeat_url, timeout=5)
    except Exception as e:
        log.logger.error('heartbeat failed: %s' % e)

    return 1


def main():
    """
    """
    global cmd_args
    cmd_args = argsparser.cmd_parse()
    if cmd_args.start:
        from daemon import runner
        sys.argv[1] = 'start'
        app = App()
        daemon_runner = runner.DaemonRunner(app)
        daemon_runner.daemon_context.files_preserve = log.daemon_files_preserve
        daemon_runner.do_action()
    elif cmd_args.stop:
        from daemon import runner
        sys.argv[1] = 'stop'
        app = App()
        daemon_runner = runner.DaemonRunner(app)
        daemon_runner.daemon_context.files_preserve = log.daemon_files_preserve
        daemon_runner.do_action()
    elif cmd_args.debug:
        eventExit = gevent.event.Event()
        if not before_runing(eventExit):
            return
        reset_setting_config(cmd_args)
        run_spider()
    else:
        argsparser.print_usage()


if __name__ == '__main__':
    main()