#!/usr/bin/env python 
# coding:utf-8
# @Time :10/10/18 10:44


import sys

sys.path.append('../')
sys.path.append('../../')

from multiprocessing.dummy import Pool
from crawler.wenshu_spider import WenshuSpider
from config.beanstalk_conf import BEANSTALK_CONF
from common.pybeanstalk import PyBeanstalk
from config.tube_conf import TUBE_INFO
from scheduler.mq_handler import MqHandler

from config.mongo_conf import CLUE_CRAWL
from config.mongo_conf import MONGO_DB_SOURCE

from common.mongo import MongDb
from common.logger import AppLogger

from common.log_for_pgsql import LogForPgSql
from common.pid_scanner import PidScanner

# from threading import Lock


class CrawlWorker(object):

    __MAX_CONNNET_LOG_REPORT = 3

    def __init__(self, log=None):
        self.log = log
        # self.pid_scanner = PidScanner(self.log)

        self.beanstalk = PyBeanstalk(BEANSTALK_CONF['host'], BEANSTALK_CONF['port'])
        self.beanstalk_handler = MqHandler(BEANSTALK_CONF, TUBE_INFO["offline_tube"], log=self.log)

        self.wenshu_crawled_case_table = MongDb(CLUE_CRAWL['host'], CLUE_CRAWL['port'], CLUE_CRAWL['db'],
                                                 CLUE_CRAWL['username'], CLUE_CRAWL['password'], log=self.log)

        self.wenshu_company_name = MongDb(MONGO_DB_SOURCE['host'], MONGO_DB_SOURCE['port'], MONGO_DB_SOURCE['db'],
                                          MONGO_DB_SOURCE['username'], MONGO_DB_SOURCE['password'], log=self.log)

        # self.__log_report = self.__proxy_log_report()

        self.pool = Pool(12)

        # self.lock = Lock()

    def __new__(cls, *args, **kwargs):
        """
        :return: 先创建进程，防止重复载入
        """
        cls.pid_scanner = PidScanner(log)
        return object.__new__(cls)

    def __load_company_name(self):
        for per_cursor in self.wenshu_company_name.traverse_batch("business_changsha_500W_company_list", {"wenshu_crawl_status": None}):
            company_name = str(per_cursor.get("_id"))
            yield company_name

        # with open("./f.txt") as f:
        #     for company_name in f.readlines():
        #         company_name = company_name.replace("\n", "")
        #         yield company_name

    # def __load_company_name(self):
    #     for per_cursor in ['中国信达资产管理股份有限公司', '中国光大银行股份有限公司太原分行']:
    #         # company_name = str(per_cursor.get("_id"))
    #         yield per_cursor

    # def __load_company_name(self):
    #     while True:
    #         job = self.beanstalk.reserve(TUBE_INFO["wenshu_company_name_tube"], 3)
    #         if job is None:
    #             self.log.info("__load_company_name 当前任务获取失败...")
    #             continue
    #         body = job.body
    #         job.delete()
    #         yield body'

    def start(self):
        self.log.info("开启启动多线程采集程序...")
        result = []
        for company_name in self.__load_company_name():
            # wenshu_spider = WenshuSpider(self.wenshu_crawled_case_table, self.beanstalk_handler, self.__log_report)
            wenshu_spider = WenshuSpider(self.wenshu_crawled_case_table, self.beanstalk_handler)
            job = self.pool.apply_async(wenshu_spider.start, args=(company_name, 20, "法院层级", "dsc"))
            result.append(job)

            if len(result) > 50:
                # self.log.info("多线程采集 即将执行力每一个程序...")
                for per_job in result:
                    # self.log.info("多线程采集正在装在程序...")
                    per_job.get()
                    # self.log.info("多线程采集正在执行程序完毕...")
                # self.log.info("多线程采集正在删除任务...")
                del result[:]
                # self.log.info("多线程删除任务完毕...")

        self.pool.close()
        self.pool.join()

        # for company_name in self.__load_company_name():
        #     wenshu_spider = WenshuSpider(self.wenshu_crawled_case_table, self.beanstalk_handler, self.__log_report)
        #     wenshu_spider.start(company_name, 20, "法院层级", "dsc")
        #
        # self.log.info("all job is done!")

    def __proxy_log_report(self):
        for i in range(self.__MAX_CONNNET_LOG_REPORT):
            try:
                self.__log_report = LogForPgSql("crawler_log_table")
                return self.__log_report
            except BaseException as e:
                self.log.warn("__proxy_log_report 连接 LogForPgSql 发生错误...")
                self.log.exception(e)
        self.log.error("__proxy_log_report 重试连接 LogForPgSql 最大次数({})，但依然无法获取数组...".format(self.__MAX_CONNNET_LOG_REPORT))
        return None


if __name__ == '__main__':
    log = AppLogger("CrawlWorker.log").get_logger()
    crawl_worker = CrawlWorker(log=log)
    crawl_worker.start()
