# coding=UTF-8
# author=suemi
# created at 16/5/15
from scrapy.spiders import Spider
from scrapy.http import Request


class ArticleSpider(Spider):
    """
    广度优先爬虫的超类
    """
    crawled = set()

    def category(self, url):
        """
        根据URL判断是否是个新闻页
        :param url:
        :return:
        """
        pass

    def generate(self, response):
        """
        从新闻页中提取出Article对象
        :param response:
        :return:
        """
        pass

    def candidates(self, response):
        """
        从返回页面中提取出接下来的URL
        :param response:
        :return:
        """
        pass

    def parse(self, response):
        # self.logger.info('Has crawled '+len(self.crawled)+" page!")
        # self.crawled.add(response.url)
        if self.category(response.url):
            yield self.generate(response)
        else:
            urls = self.candidates(response)
            # print "working on: " + response.url
            # urls = list(filter(lambda x: not x in self.crawled, urls))
            for url in urls:
                # yield Request(url,dont_filter=True)
                yield Request(url)
