import scrapy
from scrapy_dd.items import ScrapyDdItem


# 下载多页的数据
class DdSpider(scrapy.Spider):
    name = "dd"
    allowed_domains = ["e.dangdang.com"]
    # 请求的接口是html结尾的不用加/
    start_urls = ["http://e.dangdang.com/list-QCWX-dd_sale-0-1.html"]

    base_urls = "http://e.dangdang.com/list-QCWX-dd_sale-0-"
    page = 1

    def parse(self, response):
        name_list = response.xpath('//div[@class="title"]/text()')
        price_list = response.xpath('//span[@class="now"]/text()')
        src_list = response.xpath('//span[@class="bookcover"]/img[2]/@src')   # 第二个img  //td[2]//a[2]/@href

        # print(len(src_list))
        for i in range(len(name_list)):
            name = name_list[i].extract()
            price = price_list[i].extract()
            src = src_list[i].extract()

            print(src, name, price)

            book = ScrapyDdItem(src=src, name=name, price=price)
            # 获取一个对象就交给pipeline
            yield book

        # 后面几页的访问逻辑跟第一页一样，所以callback是parse
        if self.page < 10:
            self.page = self.page + 1
            url = self.base_urls + str(self.page) + ".html"
            yield scrapy.Request(url=url, callback=self.parse)
