#!/usr/bin/env python 
# coding:utf-8
# @Time :10/12/18 17:50

import sys

sys.path.append('../')
sys.path.append('../../')

from multiprocessing.dummy import Pool
from crawler.wenshu_spider import WenshuSpider
from config.beanstalk_conf import BEANSTALK_CONF
from common.pybeanstalk import PyBeanstalk
from config.tube_conf import TUBE_INFO
from scheduler.mq_handler import MqHandler

from config.mongo_conf import CLUE_CRAWL
from config.mongo_conf import MONGO_DB_SOURCE

from common.mongo import MongDb
from common.logger import AppLogger


class CrawlWorker(object):

    def __init__(self, log=None):
        self.beanstalk = PyBeanstalk(BEANSTALK_CONF['host'], BEANSTALK_CONF['port'])
        self.log = log
        self.beanstalk_handler = MqHandler(BEANSTALK_CONF, TUBE_INFO["offline_tube"], log=self.log)

        self.wenshu_crawled_case_table = MongDb(CLUE_CRAWL['host'], CLUE_CRAWL['port'], CLUE_CRAWL['db'],
                                                 CLUE_CRAWL['username'], CLUE_CRAWL['password'], log=self.log)

        self.wenshu_company_name_table = MongDb(MONGO_DB_SOURCE['host'], MONGO_DB_SOURCE['port'], MONGO_DB_SOURCE['db'],
                                                 MONGO_DB_SOURCE['username'], MONGO_DB_SOURCE['password'], log=self.log)

        self.wenshu_company_name_collection = "business_judgement_wenshu_company_list"

        self.pool = Pool(12)

    def __load_company_name(self):
        cursor = self.wenshu_company_name_table.traverse_batch(self.wenshu_company_name_collection, where={
            "$and": [{"search_status": None},
                     {"crawl_status": {"$ne": 1}}]
        })
        return cursor

    def start(self):
        result = []
        for company_name in self.__load_company_name():
            company_name = str(company_name.get("_id"))
            wenshu_spider = WenshuSpider(self.wenshu_crawled_case_table, self.beanstalk_handler,
                                         wenshu_company_name_table=self.wenshu_company_name_table,
                                         wenshu_company_collection=self.wenshu_company_name_collection)
            job = self.pool.apply_async(wenshu_spider.start, args=(company_name, 20, "法院层级", "dsc"))
            result.append(job)

            if len(result) > 48:
                for per_job in result:
                    per_job.get()
                del result[:]

        self.pool.close()
        self.pool.join()

        self.log.info("all job is done!")


if __name__ == '__main__':
    log = AppLogger("CrawlWorkerFromMongo.log").get_logger()
    crawl_worker = CrawlWorker(log=log)
    crawl_worker.start()
