#!/usr/bin/env python
# Create: 2019/1/11
__author__ = '749B'

from twisted.web.client import getPage, defer
from twisted.internet import reactor
import queue


class Request(object):
    """封装请求的url和回调函数"""

    def __init__(self, url, callback=None):
        self.url = url
        self.callback = callback  # 默认是None，则会去调用Spider对象的parse方法


class Scheduler(object):
    """调度器"""

    def __init__(self, engine):
        self.engine = engine
        self.q = queue.Queue()

    def enqueue_request(self, request):
        """添加任务"""
        self.q.put(request)

    def next_request(self):
        """获取下一个任务"""
        try:
            req = self.q.get(block=False)
        except queue.Empty:
            req = None
        return req

    def size(self):
        return self.q.qsize()


class ExecutionEngine(object):
    """引擎"""

    def __init__(self):
        self._close_wait = None  # stop_deferred
        self.start_requests = None
        self.scheduler = Scheduler(self)
        self.in_progress = set()  # 正在执行中的任务
        self.spider = None  # 在open_spider方法里添加

    def _next_request(self):
        while self.start_requests:
            request = next(self.start_requests, None)
            if request:
                self.scheduler.enqueue_request(request)
            else:
                self.start_requests = None
        while len(self.in_progress) < 5 and self.scheduler.size() > 0:  # 最大编发为5
            request = self.scheduler.next_request()
            if not request:
                break
            self.in_progress.add(request)
            d = getPage(bytes(request.url, encoding='utf-8'))
            # addCallback是正确返回的时候执行，还有addErrback是返回有错误的时候执行
            # addBoth就是上面两种情况返回都会执行
            d.addBoth(self._handle_downloader_output, request)
            d.addBoth(lambda x, req: self.in_progress.remove(req), request)
            d.addBoth(lambda x: self._next_request())
        if len(self.in_progress) == 0 and self.scheduler.size() == 0:
            self._close_wait.callback(None)

    # 这个方法和之前的有一点小的变化，主要是用到了新定义的Response对象
    def _handle_downloader_output(self, body, request):
        import types
        response = Response(body, request)
        # 如果没有指定callback就调用Spider类的parse方法
        func = request.callback or self.spider.parse
        gen = func(response)
        if isinstance(gen, types.GeneratorType):  # 是否为生成器类型
            for req in gen:
                # 这里还可以再加判断，如果是request对象则继续爬取
                # 如果是item对象，则可以交给pipline
                self.scheduler.enqueue_request(req)

    @defer.inlineCallbacks
    def open_spider(self, spider, start_requests):
        self.start_requests = start_requests
        self.spider = spider  # 加了这句
        yield None
        reactor.callLater(0, self._next_request)  # 过多少秒之后，执行后面的函数

    @defer.inlineCallbacks
    def start(self):
        """原来的stop函数"""
        self._close_wait = defer.Deferred()
        yield self._close_wait


class Response(object):

    def __init__(self, body, request):
        self.body = body
        self.request = request
        self.url = request.url

    @property
    def text(self):
        return self.body.decode('utf-8')


class Crawler(object):

    def __init__(self, spider_cls):
        self.spider_cls = spider_cls
        self.spider = None
        self.engine = None

    @defer.inlineCallbacks
    def crawl(self):
        self.engine = ExecutionEngine()
        self.spider = self.spider_cls()
        start_requests = iter(self.spider.start_requests())
        yield self.engine.open_spider(self.spider, start_requests)
        yield self.engine.start()


class CrawlerProcess(object):

    def __init__(self):
        self._active = set()
        self.crawlers = set()

    def crawl(self, spider_cls, *args, **kwargs):
        crawler = Crawler(spider_cls)
        self.crawlers.add(crawler)
        d = crawler.crawl(*args, **kwargs)
        self._active.add(d)
        return d

    def start(self):
        dl = defer.DeferredList(self._active)
        dl.addBoth(self._stop_reactor)
        reactor.run()

    @classmethod
    def _stop_reactor(cls, _=None):
        """原来的all_done函数
        之前的示例中，这个函数都是要接收一个参数的。
        虽然不用，但是调用的模块一定会传过来，所以一定要接收一下。
        这里就用了占位符来接收这个参数，并且设置了默认值None。
        """
        print("All Done")
        reactor.stop()


class Spider(object):

    def __init__(self):
        if not hasattr(self, 'start_urls'):
            self.start_urls = []

    def start_requests(self):
        for url in self.start_urls:
            yield Request(url)

    def parse(self, response):
        print(response.body)


class ChoutiSpider(Spider):
    name = "chouti"
    start_urls = ["http://dig.chouti.com"]

    def parse(self, response):
        print(next((s for s in response.text.split('\n') if "<title>" in s)))


class BingSpider(Spider):
    name = "bing"
    start_urls = ["http://www.bing.com"]


class BaiduSpider(Spider):
    name = "baidu"
    start_urls = ["http://www.baidu.com"]


if __name__ == '__main__':
    spider_cls_list = [ChoutiSpider, BingSpider, BaiduSpider]
    crawler_process = CrawlerProcess()
    for spider_cls in spider_cls_list:
        crawler_process.crawl(spider_cls)
    crawler_process.start()

