#!/user/bin/python
# -*- coding: utf-8 -*-
'''
 @Time    : 2018/4/15 16:48
 @File    : custom_crawlerprocess.py
 @desc    :
'''

import six
from scrapy.crawler import CrawlerProcess
from scrapy.settings import BaseSettings
from scrapy.utils.project import get_project_settings
from twisted.internet import defer

from commonSpiders.creeper.crawlermanagers.crawler_config import CrawlerGroupConfig
from commonSpiders.creeper.crawlers.crawlers import CustomeCrawler
from commonSpiders.creeper.utils.utils import guid_generate


class CustomCrawlerProcess(CrawlerProcess):
    '''
    爬虫调度器进程
    '''

    def __init__(self, settings=None, install_root_handler=True, spiders_list=[], guid=None):
        super(CustomCrawlerProcess, self).__init__(settings, install_root_handler)
        # 存储原始配置
        self.source_settings = {}
        if isinstance(self.settings, BaseSettings):
            settings = self.settings.copy_to_dict()
        else:
            settings = get_project_settings().copy_to_dict()
        self.source_settings.update(settings)

        self.guid = guid

        # 存储当前可用的所有爬虫
        spiders_list = list(set(spiders_list))
        self.spiders_dict = {spider.name: spider for spider in spiders_list}

        # 存储每个组对应的爬虫
        self.group_crawlers = {}

    def _create_crawler(self, spidercls):
        '''
        创建爬虫对象，重写父类函数
        :param spidercls:
        :return:
        '''
        if isinstance(spidercls, six.string_types):
            spidercls = self.spider_loader.load(spidercls)
        return CustomeCrawler(spidercls, self.settings)

    def start(self, stop_after_crawl=True):
        '''
        开启爬虫进程
        :param stop_after_crawl:
        :return:
        '''
        super(CustomCrawlerProcess, self).start(stop_after_crawl)

    def stop(self):
        '''
        停止爬虫进程
        :return:
        '''
        super(CustomCrawlerProcess, self).stop()

    def config_spider_class(self, key, spider):
        '''
        配置爬虫类
        :param key:
        :param spider:
        :return:
        '''
        self.spiders_dict.update({
            key: spider
        })

    def add_crawlers(self, spider_class_obj, count, settings_config={}):
        '''
        添加爬虫对象
        :param spider_class_obj:
        :param count:
        :param settings_config:
        :return:
        '''
        if not isinstance(settings_config, dict):
            print('配置不能为空')
            return

        if not count:
            count = 1

        self.settings = self.source_settings.copy()
        self.settings.update(settings_config)
        for i in range(count):
            print('创建爬虫:%s' % i)
            spider_class_obj.index = i
            self.crawl(spider_class_obj)

    def add_crawlers_by_spider_key(self, group_config):
        '''
        添加一个爬虫组
        :param group_config: CrawlerGroupConfig
        :return:
        '''
        if not isinstance(group_config, CrawlerGroupConfig):
            return False
        spider = self.spiders_dict.get(group_config.spider_key, None)
        if spider:
            guid = guid_generate()
            spider.name = guid
            spider.data_source = group_config.item_data_source_list
            spider.set_item_parse(group_config.item_parse_list)
            spider.start_url = group_config.start_url

            settings = group_config.settings
            self.group_crawlers.setdefault(group_config.group_key, {
                'crawlers': [],
                'settings': settings
            })

            len_1 = len(list(self.crawlers))
            self.add_crawlers(spider, group_config.count, settings)

            crawlers_list = list(self.crawlers)
            len_2 = len(crawlers_list)
            if len_2 > len_1:
                cur_crawlers = crawlers_list[len_1: len_2]
            else:
                cur_crawlers = []
            if cur_crawlers:
                self.group_crawlers[group_config.group_key]['crawlers'] += cur_crawlers

    def stop_crawlers(self, group_key):
        '''
        停掉某个爬虫任务
        :param spider_key:
        :return:
        '''
        group = self.group_crawlers.get(group_key, {})
        crawler = group.get('crawlers', None)
        if crawler:
            return defer.DeferredList([crawler.stop()])
        else:
            return None

    def pause_crawlers(self, group_key):
        pass

    def resume_crawlers(self, group_key):
        pass












