# -*- coding: utf-8 -*-
# 读取redis中的图书标签链接，分页爬取每页的图书url信息，存入redis
import scrapy
import redis
from master.items import MasterItem
from urllib.parse import quote


class BookSpider(scrapy.Spider):
    name = 'book'
    allowed_domains = ['book.douban.com']
    base_url = 'https://book.douban.com/'

    def start_requests(self):
        '''从redis中读取图书标签，爬取标签的网页信息'''
        r=redis.Redis(host=self.settings.get("REDIS_HOST"),port=self.settings.get("REDIS_PORT"),decode_responses=True)
        while r.llen("book:tag_url"):
            tag=r.lpop("book:tag_url")
            url=self.base_url+quote(tag)
            yield scrapy.Request(url=url,callback=self.parse,dont_filter=True)

    def parse(self, response):
        '''解析获取每页中图书详情地址'''
        # 图书列表
        alists = response.css('#subject_list ul li.subject-item a.nbg::attr(href)').extract()
        if alists:
            for u in alists:
                item=MasterItem()
                item["url"]=u # 图书地址
                yield item
        # 获取下一页的列表地址
        next_url=response.css("span.next a::attr(href)")
        if next_url:
            next_url=next_url.extract_first()
            url=response.urljoin(next_url)
            yield scrapy.Request(url=url,callback=self.parse)

