# -*- coding: utf-8 -*-
import scrapy
from dangdang.items import BookItem


class BookSpider(scrapy.Spider):
    name = 'book'
    allowed_domains = ['search.dangdang.com']
    start_urls = ['http://search.dangdang.com/?key=python&act=inputt&page_index=1']


    def parse(self, response):
        #获取所有python书
        dlist = response.selector.css("ul.bigimg li")
        #遍历课程，并解析信息后封装到item容器中
        for dd in dlist:
            item = BookItem()
            item['title'] = dd.css("a::attr(title)").extract_first()
            if dd.css("a img::attr(src)").extract_first() != "images/model/guan/url_none.png":
                item['pic'] = dd.css("a img::attr(src)").extract_first()
            else:
                item['pic'] = dd.css("a img::attr(data-original)").extract_first()

            item['price'] = dd.re_first('<span class="search_now_price">(.*?)</span>')
            item['author'] = dd.css("p.search_book_author span a::attr(title)").extract()[0]  
            item['time'] = dd.re_first("<span> /(.*?)</span>")
            item['press'] = dd.css("p.search_book_author span a::attr(title)").extract()[-1]
            item['content'] = dd.css("p.detail::text").extract_first()
            #print(item)
            #print("="*70) 
            yield item


        #遍历爬取所有页数据
        next_url = response.css("li.next a::attr(href)").extract_first()
        if next_url != None:
            url = response.urljoin(next_url)
            yield scrapy.Request(url=url,callback=self.parse)

