import scrapy
from scrapy_03_duangduang.items import Scrapy03DuangduangItem

class DuangduangSpider(scrapy.Spider):
    name = "duangduang"
    # 允许访问的网站
    allowed_domains = ["category.dangdang.com"]
    start_urls = ["http://category.dangdang.com/pg1-cp01.01.02.00.00.00.html"]
    baseurl="http://category.dangdang.com/"
    page=1
    def parse(self, response):
        print("===========================>【开始爬取第"+str(self.page)+"页！】")
        # 获取书的图片、描述、价格
        # 解析数据
        items=response.xpath('//ul[@id="component_59"]/li')
        for item in items:
            # 获取图片链接
            image_urlList=item.xpath('.//a[@name="itemlist-picture"]/img/@data-original')
            if image_urlList==None or len(image_urlList)==0:
                image_urlList=item.xpath('.//a[@name="itemlist-picture"]/img/@src')
            image_url=image_urlList.extract_first()
            # 获取书本名称
            name_List=item.xpath('.//p[@name="title"]/a/text()')
            name=name_List.extract_first()
            # 获取书本价格
            price_List=item.xpath('.//p[@class="price"]/span[@class="search_now_price"]/text()')
            price=price_List.extract_first()
            # print(image_url,name,price)

            # 定义item对象
            book=Scrapy03DuangduangItem(image_url=image_url,name=name,price=price)

            yield book

            # 多页下载实现
            if self.page<100:
                self.page=self.page+1
                url=self.baseurl+'pg'+str(self.page)+'-cp01.01.02.00.00.00.html'
                request= scrapy.Request(url=url,callback=self.parse)
                yield request

