import scrapy
from zhihuBook.items import ZhihubookItem
# 导入RedisSpider模块
from scrapy_redis.spiders import RedisSpider

class BookSpider(RedisSpider):
    name = "book"
    allowed_domains = ["douban.com"]
    #   #分布式爬虫最大的区别之一就是start_urls是手动推动redis数据库,让爬虫程序从redis数据库中读取!
    #     #然后所有爬虫【多台机器】爬出来的数据进行汇总存取redis数据库中!
    # start_urls = ["https://book.douban.com/top250?start=0"]
    # 声明变量redis_key，对应的value值可以随意更换，但是要与redis数据库中的key值保持一致
    redis_key = 'book:start_urls'

    def parse(self, response):
        # 生成页面的链接
        for i in range(0,10):
            next_url = "https://book.douban.com/top250?start={}".format(i*25)
            yield scrapy.Request(
                url=next_url,
                method="get",
                callback=self.book_parse
            )

    def book_parse(self, response):
        # 专门进行页面解析
        tables = response.xpath("//*[@id='content']/div/div[1]/div/table")
        bookmsg = ZhihubookItem()
        for table in tables:
            bookmsg["book_name"] = table.xpath("./tr/td[2]/div[1]/a/text()").extract_first().strip()
            bookmsg["book_intruduct"] = table.xpath("./tr/td[2]/p[1]/text()").extract_first().strip()
            bookmsg["book_url"] = table.xpath("./tr/td[2]/div[1]/a/@href").extract_first().strip()
            bookmsg["book_img"] = table.xpath("./tr/td[1]/a/img/@src").extract_first().strip()
            yield bookmsg
        pass
