import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule

from scrapy_readbook_36new.items import ScrapyReadbook36NewItem


class ReadbookSpider(CrawlSpider):
    name = "readbook"
    allowed_domains = ["www.dushu.com"]
    start_urls = ["https://www.dushu.com/book/1175_1.html"]

    # crawlSpider使用链接提取器的正则方式
    # 可以提取多页数据，如果要追加爬取所有页的数据把follow改为True，是否跟进的意思
    rules = (Rule(LinkExtractor(allow=r"/book/1175_\d+\.html"), callback="parse_item", follow=False),)

    def parse_item(self, response):

        book_list = response.xpath('//div[@class="book-info"]')
        for book in book_list:
            name = book.xpath('.//img/@alt').extract_first()
            src = book.xpath('.//img/@data-original').extract_first()
            author = book.xpath('.//p[1]/text()').extract_first()
            # print(name,src,author)

            book_data = ScrapyReadbook36NewItem(name=name,src=src,author=author)

            yield book_data

