#!/usr/bin/env python
# encoding: utf-8
"""
@author: youfeng
@email: youfeng243@163.com
@license: Apache Licence
@file: main.py
@time: 2017/12/20 14:59
"""
import signal
import time

import gevent.pool
from gevent import monkey

from crawler.zhejiang.search_page_crawler import SearchPageCrawler

monkey.patch_all()

import click

from config.app_conf import CRAWL_TABLE_CONF
from logger import Logger

# 日志模块
global_log = Logger('old_zhejiang_main.log').get_logger()

is_running = True


# 休眠
def check_sleep(times):
    count = 0
    while count < times and is_running:
        count += 1
        time.sleep(1)


# 获得模块日志名称
def get_log_name(province):
    return 'crawl_{}.log'.format(province)


# 列表页抓取入口
def crawl_runner(seed_table, param_table, search_page_table, thread_num, store_tube):
    global_log.info("列表页抓取启动: seed_table = {}".format(seed_table))

    crawl_flag = CRAWL_TABLE_CONF['seed_table']['crawl_flag']
    search_flag = CRAWL_TABLE_CONF['seed_table']['search_flag']
    list_select_param = CRAWL_TABLE_CONF['seed_table']['list_select_param']

    # 生成模块日志
    log_name = get_log_name(seed_table)
    crawler = SearchPageCrawler(store_tube,
                                seed_table,
                                param_table,
                                search_page_table,
                                search_flag,
                                Logger(log_name).get_logger())

    # 创建标识索引
    crawler.create_source_index(seed_table, crawl_flag)
    global_log.info("索引检测完成: table_name = {} crawl_flag = {}".format(
        seed_table, crawl_flag))

    crawler.create_source_index(seed_table, search_flag)
    global_log.info("索引检测完成: table_name = {} search_flag = {}".format(
        seed_table, search_flag))

    # 初始化线程池
    pool = gevent.pool.Pool(thread_num)
    global_log.info('启用协程...')
    global_log.info('当前开启协程数目: thread_num = {}'.format(thread_num))

    times = 0
    while is_running:
        times += 1
        result_list = []
        __iter = crawler.get_iterator_list(seed_table, list_select_param, ['_id'])
        for item in __iter:
            company = item.get('_id')

            result_list.append(pool.apply_async(crawler.crawl_process, args=(company,)))
            if len(result_list) >= 10000:
                for result in result_list:
                    if not is_running:
                        break

                    result.get()
                del result_list[:]
            if not is_running:
                break

        if len(result_list) > 0:
            for result in result_list:
                result.get()

        # 等待线程结束
        pool.join()

        if not is_running:
            global_log.info("收到退出命令, 退出列表页抓取...")
            break

        global_log.info("种子信息遍历完成: times = {} 休眠5s".format(times))
        check_sleep(5)


def process_quit(signo, frame):
    global is_running
    is_running = False
    global_log.info("收到退出进程信号...")


@click.command()
@click.option('--seed_table',
              default='zhejiang_keyword',
              help='旧版浙江工商种子表')
@click.option('--param_table',
              default='new_zhejiang_search_list',
              help='旧版浙江工商参数表')
@click.option('--search_page_table',
              default='old_zhejiang_search_page',
              help='旧版浙江工商列表页表')
@click.option('--thread_num',
              default=1,
              type=int,
              help='需要采集的省份范围,默认全国总局')
@click.option('--store_tube',
              default='gsxt_store',
              help='存储消息队列')
def main(seed_table, param_table, search_page_table, thread_num, store_tube):
    # 注册优雅退出
    signal.signal(signal.SIGINT, process_quit)
    signal.signal(signal.SIGTERM, process_quit)
    signal.signal(signal.SIGQUIT, process_quit)
    signal.signal(signal.SIGUSR1, process_quit)

    try:
        # 运行抓取
        crawl_runner(seed_table, param_table, search_page_table, thread_num, store_tube)
    except Exception as e:
        global_log.error("程序退出异常: ")
        global_log.exception(e)


if __name__ == '__main__':
    main()
