import scrapy
from scrapy_dangdang_test.items import ScrapyDangdangTestItem

class DangSpider(scrapy.Spider):
    name = 'dang'
    #多页下载，需要调整allowed_domains的范围，一般是只写域名
    allowed_domains = ['category.dangdang.com']
    start_urls = ['http://category.dangdang.com/cp01.01.02.00.00.00.html']
    base_url = 'http://category.dangdang.com/pg'
    page = 1
    def parse(self, response):
        print("==================================头部========================================")
        # name = //*[@id="component_59"]/li//img/@alt
        # src = //*[@id="component_59"]/li//img/@src
        # price = //*[@id="component_59"]/li//p[@class="price"]/span[1]/text()
        li_list = response.xpath('//*[@id="component_59"]/li')
        for i in li_list :
            src = i.xpath('.//img/@data-original').extract_first()
            if src:
                src = i.xpath('.//img/@data-original').extract_first()
            else:
                src = i.xpath('.//img/@src').extract_first()
            name = i.xpath('.//img/@alt').extract_first()
            price = i.xpath('.//p[@class="price"]/span[1]/text()').extract_first()
            book = ScrapyDangdangTestItem(src=src,name=name,price=price)
            # 获取一个book对象就将其交给piplines管道
            yield book
        print("===================================尾部=======================================")
        #多页下载时
        if self.page <100 :
            self.page = self.page + 1
            url = self.base_url + str(self.page) + '-cp01.01.02.00.00.00.html'
            yield scrapy.Request(url=url,callback=self.parse)
        pass
