import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule

from scrapy_readbook_36.items import ScrapyReadbook36Item


class ReadbookSpider(CrawlSpider):
    name = "readbook"
    allowed_domains = ["www.dushu.com"]
    # 此处是开始页 https://www.dushu.com/book/1188.html，但下面的rules中需要1188_数字，此网址没有
    # 但是1188_1也是可以加载到第一页的，所以改为 https://www.dushu.com/book/1188_1.html
    # 这样下面的加载规则才可以加载到第一页的数据
    start_urls = ["https://www.dushu.com/book/1188_1.html"]

    # follow=True代表是否跟进，比如分页那里只显示14页，后面看不到还有多少页，改为True就可以一直跟进，加载完所有的数据
    rules = (Rule(LinkExtractor(allow=r"/book/1188_\d+\.html"), callback="parse_item", follow=True),)

    def parse_item(self, response):

        name_list = response.xpath('//div[@class="bookslist"]//img')

        for i in name_list:
            name = i.xpath('./@alt').extract_first()
            src = i.xpath('./@data-original').extract_first()
            # 调用items中的数据结构发起请求，返回数据
            book = ScrapyReadbook36Item(name=name,src=src)

            # 返回的数据发送到pipelines中
            yield book

        item = {}
        return item
