import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import Rule


class DoubanSpider(scrapy.Spider):
    name = 'douban'
    allowed_domains = ['read.douban.com']
    start_urls = ['https://read.douban.com/category/200']
    # rules = (
    #     Rule(LinkExtractor(allow=('/category/501?\\S',)), callback='parse'),
    # )

    def parse(self, response):
        for title in response.xpath('//*[@id="react-root"]/div/section[2]/div[1]/ul/li/div/div[2]/h4/a/span/span').getall():
            self.logger.info("title:"+title)
            yield {"title": title}
        # for href in self.start_urls:
        #     yield scrapy.Request(href, self.parse)
        # self.logger.info("a response from %s just arrived", response.url)

    # def parse_item(self, response):
    #     pass
