# _*_ coding:utf-8 _*_
"""
引擎模块
负责协调各个模块之间的关系
1.框架启动
2.框架运行流程

设计思路
1实现各个模块的初始化
2启动爬虫

1.2版本  增加下载中间件和爬虫中间件
1.3版本  增加日志功能


"""
from datetime import datetime

from ..core.pipeline import Pipeline
from ..core.scheduler import Scheduler
from ..core.downloder import Downloder
from ..core.spider import Spider
from ..http.request import Request
from ..items import Item
from ..middlewares.downlode_middlewares import DownloadMiddlewares
from ..middlewares.spider_middlewares import SpiderMiddlewares

from ..utils.log import logger

class Engine(object):
    def __init__(self):
        # 初始化四大核心模块
        self.pipeline = Pipeline()
        self.scheduler = Scheduler()
        self.downloder = Downloder()
        self.spider = Spider()
        self.downlode_middlewares =DownloadMiddlewares()
        self.spider_middlewares = SpiderMiddlewares()

    def start(self):
        # 对外启动的接口
        # 为了代码的扩展性,在内部封装一个私有方法,实现核心逻辑
        start_time=datetime.now()
        logger.info('起始时间:{}'.format(start_time))
        self._start()
        end_time=datetime.now()
        logger.info("结束时间:{}".format(end_time))

        logger.info('总耗时:{}'.format((end_time-start_time).total_seconds()))

    def _start(self):
        # 1调用爬虫start_reques方法获取初始请求对象
        request = self.spider.start_requests()
        # 1.1爬虫模块到调度器的爬虫中间件
        request = self.spider_middlewares.process_request(request)

        # 2把请求对象添加到调度器中
        self.scheduler.add_request(request)

        # 3获取调度器中的请求对象
        request = self.scheduler.get_request()
        # 1.2下载中间件处理请求
        request=self.downlode_middlewares.process_request(request)

        # 4把请求对象交给下载器,获取响应数据
        response = self.downloder.get_response(request)

        # 1.3下载器中间件处理响应数据
        response=self.downlode_middlewares.process_response(response)

        # 1.4 爬虫中间件处理响应数据
        response=self.spider_middlewares.process_response(response)

        # 5让爬虫处理响应数据
        resault = self.spider.parse(response)

        # 6判断这个响应数据是请求还是数据,
        # 6.1如果是请求交给scheduler.add_request处理
        if isinstance(resault, Request):

            # 1.5交给调度器之前爬虫中间件处理request
            resault=self.spider_middlewares.process_request(request)

            self.scheduler.add_request(resault)

        # 6.2如果是数据交给pipeline.process_item处理
        elif isinstance(resault, Item):
            self.pipeline.process_item(resault)

        else:
            raise Exception('爬虫返回数据只能是数据或请求')
