#!/usr/bin/env python
# -*- coding:utf-8 -*-
import random
import sys
import time
from multiprocessing.dummy import Pool as ThreadPool

from common import util
from common.config_parser import ConfigParser
from common.generator import create_crawl_object
from common.mongo import MongDb
from config.conf import mongo_db_source
from logger import Gsxtlogger


class StartTaskCrawler(object):
    def __init__(self, config_file='config/cmb_gsxt_search.conf', web_site=None):
        self.worker_list = {}
        self.config_list = {}
        self.thread_num = 8
        self.web_site = web_site

        # 不指定抓取的站点直接抛异常
        if web_site is None or web_site == '':
            raise StandardError('website error...')

        # 没有指定配置文件直接抛异常
        if config_file is None or config_file == '':
            raise StandardError('website error...')

        # 加载配置
        self.source_table = 'cmb_list_all'
        self.source_select_param = {}
        self.load_config(config_file)

        # 开启日志
        log_name = config_file.split('/')[1].split('.')[0] + '_' + self.web_site
        self.log = Gsxtlogger(util.get_pid_log_name(log_name)).get_logger()

        # 连接mongodb
        self.source_db = MongDb(mongo_db_source['host'], mongo_db_source['port'], mongo_db_source['db'],
                                mongo_db_source['username'], mongo_db_source['password'], log=self.log)

        # 初始化worker
        self.init_worker(self.config_list)

    def init_worker(self, config_list):
        for key, value in config_list.iteritems():
            self.worker_list[key] = create_crawl_object(value, key)
            self.log.info('初始化 {key} 完成..'.format(key=key))

    def load_config(self, config_file):

        # 读取配置信息
        conf_parse = ConfigParser(config_file)

        # 加载单独省份信息
        config_dict = conf_parse.get_session(self.web_site)
        if config_dict is None:
            raise StandardError('站点信息错误...{web_site}'.format(web_site=self.web_site))

        # 更改线程数目
        if config_dict['thread_num'] is not None:
            self.thread_num = int(config_dict['thread_num'])

        # 改变种子表指向
        if config_dict['source_table'] is not None:
            self.source_table = config_dict['source_table']
        else:
            raise StandardError('没有指定原始种子表: source_table')

        # 确定筛选条件
        if config_dict['source_select_param'] is not None:
            self.source_select_param = eval(config_dict['source_select_param'])

        # 添加到配置列表
        self.config_list[self.web_site] = config_dict

    def __get_iterator_company_list(self):
        return self.source_db.traverse(self.source_table, self.source_select_param)

    def __get_all_company_list(self):
        company_list = []
        start_time = time.time()
        self.log.info('开始加载数据库种子列表..')
        for item in self.source_db.select(self.source_table, self.source_select_param):
            company_list.append(item)
        self.log.info('加载数据库数据个数为: size = {size}'.format(size=len(company_list)))
        end_time = time.time()

        self.log.info('加载数据起始时间: {st}'.format(st=start_time))
        self.log.info('加载数据结束时间: {et}'.format(et=end_time))
        self.log.info('加载数据消耗时间: {t}s'.format(t=end_time - start_time))

        return company_list

    def __get_source_list(self):
        total_seed = self.source_db.select_count(self.source_table)
        self.log.info('当前种子表: {table} 数目为 {size}'.format(
            table=self.source_table, size=total_seed))

        if total_seed <= 100000:
            company_list = self.__get_all_company_list()
            self.log.info('加载完成 种子列表...')
        else:
            company_list = self.__get_iterator_company_list()
            self.log.info('迭代遍历 种子列表...')

        return company_list

    def task_run(self):
        company_list = self.__get_source_list()
        result_list = []

        # 创建线程池
        pool = ThreadPool(processes=self.thread_num)
        self.log.info('当前开启线程数目: thread_num = {num}'.format(num=self.thread_num))
        try:
            for item in company_list:
                result_list.append(pool.apply_async(self.worker_list[self.web_site].query_task,
                                                    args=(item,)))
        except Exception as e:
            self.log.exception(e)

        pool.close()
        pool.join()

        # 清除使用内存
        if isinstance(company_list, list):
            del company_list

        self.log.info('完成抓取!!')

    def start_worker(self):
        while True:
            start_time = time.time()
            self.task_run()
            end_time = time.time()
            self.log.info('扫描起始时间: {st}'.format(st=start_time))
            self.log.info('扫描结束时间: {et}'.format(et=end_time))
            self.log.info('扫描消耗时间: {t}s'.format(t=end_time - start_time))
            self.log.info('完成扫描...')
            time.sleep(random.randint(300, 400))


def main():
    config = 'config/cmb_gsxt_search.conf'
    website = 'sichuan'

    length = len(sys.argv)
    if length > 1:
        config = sys.argv[1]
        if length > 2:
            website = sys.argv[2]

    crawler = StartTaskCrawler(config_file=config,
                               web_site=website)
    crawler.start_worker()


if __name__ == "__main__":
    main()
