import scrapy

from scrapy_dangdang_34.items import ScrapyDangdang34Item


class DangdangSpider(scrapy.Spider):
    name = "dangdang"
    allowed_domains = ["category.dangdang.com"]
    start_urls = ["http://category.dangdang.com/cp01.21.03.03.00.00.html"]

    base_url = 'http://category.dangdang.com/pg'
    page = 1

    def parse(self, response):

        src_list = response.xpath('//div[@id="search_nature_rg"]//li')
        price_list = response.xpath('//div[@id="search_nature_rg"]//li/p[@class="price"]/span[@class="search_now_price"]')

        for i in range(len(src_list)):
            src = src_list[i].xpath('./a/img/@data-original').extract_first()
            if src:
                src = src
            else:
                src = src_list[i].xpath('./a/img/@src').extract_first()
            src = 'http:' + src

            name = src_list[i].xpath('./a/img/@alt').extract_first()
            price = src_list[i].xpath('./p[@class="price"]/span[@class="search_now_price"]/text()').extract_first()

            # 调用item中的数据类型以及请求方法发送请求
            book = ScrapyDangdang34Item(src=src,name=name,price=price)
            # 将数据传递到pipelines
            yield book

        if self.page < 100:   # 要下载多少页，就写小于多少，最大100
            self.page += 1
            url = self.base_url + str(self.page) + '-cp01.21.03.03.00.00.html'

            yield scrapy.Request(url=url, callback=self.parse)