import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule


# 集成链接提取器
class AppSpider(CrawlSpider):
    name = "app2"
    # allowed_domains = ["book.douban.com"]
    start_urls = ["https://book.douban.com/latest"]
    rules = (
        # Rule: 规则解析器，可以将连接提取器提取到的所有连接表示的页面进行指定规则（有中间的回调函数决定）的解析
        # LinkBxtractor:连接提取器，会去上面起始url响应回来的页面中，提取指定的url
        # deny:排除没用的URL
        Rule(LinkExtractor(allow=r'https://book.douban.com/subject/\d+',
                           deny=r'https://book.douban.com/subject/\d+/.+'),
             callback='parse_item', follow=True),
    # follow=True可以跟进保证将所有页面都提取出来（实际就是去重功能）
    )

    def parse_item(self, response):
        print(response.url)
        print(response.xpath('//*[@id="wrapper"]/h1/span/text()').get())
        pass


