import scrapy
from scrapy_ddw.items import ScrapyDDWItem

class DdwSpider(scrapy.Spider):
    name = "ddw"
    allowed_domains = ["e.dangdang.com"]
    start_urls = ["https://e.dangdang.com/classification_list_page.html?category=ZGJDDXS&dimension=dd_sale&order=0"]

    def parse(self, response):
        #所有的selector对象返回的还是selector
        with open("ddw.html",'wb') as f:
            f.write(response.body)
        print("parse")
        selector_list = response.xpath('//div[@id="book_list"]/a')
        print(selector_list)
        print("parse")
        for selector in selector_list:
            #//div[@id="book_list"]/a/span[@class="bookcover"]/img[2]/@src
            book=selector.xpath('./span[@class="bookcover"]/img[2]/@alt')
            image=selector.xpath('./span[@class="bookcover"]/img[2]/@src')
            author=selector.xpath('.//div[@class="bookinfo"]/div[@class="author"]')
            price=selector.xpath('./div/div[@class="price"]/span[1]')
            print(book,image,author,price)
            book = ScrapyDDWItem(book=book, image=image, author=author, price=price)
            #获取一个book 就交给pipelines
            yield book
