
from scrapy_plus.conf import settings

if settings.ASYNC_TYPE == "thread":
    from multiprocessing.dummy import Pool
elif settings.ASYNC_TYPE == "coroutine":
    from scrapy_plus.async.coroutine import Pool
else:
    raise Exception('不支持{}异步方式'.format(settings.ASYNC_TYPE))



from .scheduler import Scheduler
from .downloader import Downloader

from scrapy_plus.https.request import Request
from scrapy_plus.utils.log import logger
from datetime import datetime

import importlib
import time
from scrapy_plus.utils.collector import NormalStatsCollector, ReidsStatsCollector

class Engine():

    # 实例化化４个模块
    def __init__(self):

        if settings.IS_DISTRIBUTE:
            self.collector = ReidsStatsCollector()
        else:
            self.collector = NormalStatsCollector()
        # 计数
        self.scheduler = Scheduler(self.collector)

        # 实例化化spider 给main.py 使用
        self.spiders = self._auto_import_instances(settings.SPIDERS, isspider=True)
        # self.scheduler = Scheduler(self.collector)
        self.pipelines = self._auto_import_instances(settings.PIPELINES)
        self.downloader = Downloader()
        # 实例化　中间件
        self.spider_mids = self._auto_import_instances(settings.SPIDERS_MIDDLEWARES)
        self.downloader_mids = self._auto_import_instances(settings.DOWNLOADER_MIDDLEWARES)
        # # 记录　请求个数　响应个数
        # self.total_request_num = 0
        # self.total_response_num = 0

        # 创建线程池
        self.pool = Pool()

        self.is_running = True

    # 动态导包
    def _auto_import_instances(self, path=[], isspider=False):
        '''通过配置文件，动态导入类并实例化
        path: 表示配置文件中配置的导入类的路径
        isspider: 由于爬虫需要返回的是一个字典，因此对其做对应的判断和处理
        '''
        instances = {} if isspider else []
        for p in path:
            module_name = p.rsplit(".", 1)[0]  # 取出模块名称
            cls_name = p.rsplit(".", 1)[1]  # 取出类名称
            ret = importlib.import_module(module_name)  # 动态导入爬虫模块
            cls = getattr(ret, cls_name)  # 根据类名称获取类对象

            if isspider:
                instances[cls.name] = cls()  # 组装成爬虫字典{spider_name:spider(),}
            else:
                instances.append(cls())  # 实例化类对象
                # 把管道中间件分别组装成 管道列表=[管道类1(),管道类2()] / 中间件列表 = [中间件类1(),中间件类2()]
        return instances  # 返回类对象字典或列表

    # 初始化请求对象　－－入队列
    def _start_requests(self):

        # 1 spider--request--engine
        def _func(spider_name, spider):
            requests = spider.start_requests()

            for request in requests:
                # 爬虫中间件--request
                for spider_mid in self.spider_mids:
                    request = spider_mid.process_request(request)

                # 给每一个 requst 绑定自己的 爬虫key
                request.spider_name = spider_name

                # 2. engine ----request----schduler
                self.scheduler.add_request(request)
                # self.total_request_num += 1
                self.collector.incr(self.collector.request_nums_key)

        for spider_name, spider in self.spiders.items():
            # 让每一个爬虫 都异步执行初始请求对象 入队列
            self.pool.apply_async(_func, args=(spider_name, spider))

    # 出队列　－－下载数据
    def execute_request_response_item(self):
        # 3 scheduler--request--engine
        # 出队列给　调度器－－request--引擎
        request = self.scheduler.get_request()

        # 判断对象是否为空　跳出循环
        if request is None:
            return

        # 下载中间件 --request
        for downloader_mid in self.downloader_mids:
            request = downloader_mid.process_request(request)

        # 4.1 engine--request--downloader
        # 4.2 downloader--response--engine
        # 给下载器　引擎－－request--下载器   下载器－－response--引擎
        response = self.downloader.get_response(request)

        # 将request meta 传递给 response meta
        response.meta = request.meta

        # 下载中间件--response
        for downloader_mid in self.downloader_mids:
            response = downloader_mid.process_response(response)

        # 爬虫中间件--response
        for spider_mid in self.spider_mids:
            response = spider_mid.process_response(response)

        # 5 engine--response--spider
        # 引擎给爬虫解析 引擎－－response--爬虫
        spider = self.spiders[request.spider_name]
        parse = getattr(spider, request.parse)
        results = parse(response)

        for result in results:
            # 6 resule--engine判断
            # 是数据给管道　是请求给引擎
            if isinstance(result, Request):
                # 如果是 request: engine-request---scheduler
                # request : 引擎－－request--调度器
                # 新请求  爬虫中间件
                for spider_mid in self.spider_mids:
                    result = spider_mid.process_request(result)
                self.scheduler.add_request(result)
                # 计数
                # self.total_request_num += 1
                self.collector.incr(self.collector.request_nums_key)
            # 如果是　item engine-item-pipeline
            # item : 引擎－－item--管道存储
            else:
                for pipline in self.pipelines:
                    pipline.process_item(result, spider)
        # 记录响应个数
        # self.total_response_num += 1
        self.collector.incr(self.collector.response_nums_key)

    def _callback(self, item):

        if self.is_running:
            self.pool.apply_async(self.execute_request_response_item, callback=self._callback,
                                  error_callback=self._error_back)

    # 自动捕获异常 爬虫的异常
    def _error_back(self, e):
        try:
            raise e
        except Exception as e:
            logger.exception(e)

    # 初始化请求对象
    def _start(self):
        # 初始化请求对象　－－入队列
        self.pool.apply_async(self._start_requests, error_callback=self._error_back)

        # 手动设置 最大 并发数
        for i in range(2):
            self.pool.apply_async(self.execute_request_response_item, callback=self._callback,
                                  error_callback=self._error_back)
        # 判断 多爬虫中 没有增量式 的时候的条件
        sum_task = sum([spider.timed_task for spider in self.spiders.values()])

        # self._start_requests()
        # 出队列　－－　下载数据
        while True:
            # self.execute_request_response_item()
            time.sleep(0.001)

            # 判断条件　爬虫结束
            # 代表 没有增量式 正常退出
            if sum_task == 0:
                if self.collector.request_nums != 0:
                    if self.collector.response_nums + self.collector.repeat_request_nums >= self.collector.request_nums:
                        self.is_running = False
                        break
        self.pool.close()
        self.pool.join()

    # 调用上面的方法
    def start(self):

        start_time = datetime.now()

        self._start()

        end_time = datetime.now()
        logger.info('是否启用分布式:{}'.format(settings.IS_DISTRIBUTE))
        logger.info('异步的方式是:{}'.format(settings.ASYNC_TYPE))
        logger.info('爬虫开启时间:{}'.format(start_time))
        logger.info('爬虫结束时间:{}'.format(end_time))
        logger.info('总的请求个数:{}'.format(self.collector.request_nums))
        logger.info('总的重复个数:{}'.format(self.collector.repeat_request_nums))
        logger.info('总的响应个数:{}'.format(self.collector.response_nums))
        logger.info('爬虫耗时时间:{}'.format((end_time - start_time).total_seconds()))

        # 清空下 redis 里面 记录的个数 计数器
        self.collector.clear()
























