# -*- coding: utf-8 -*-
import scrapy
from dangdang.items import SearchItem

class SearchSpider(scrapy.Spider):
    name = 'search'
    allowed_domains = ['search.dangdang.com']
    start_urls = ['http://search.dangdang.com/?key=python&page_index=1']
    p = 1
    def parse(self, response):
        item = SearchItem()
        blists = response.css("div.shoplist li")
        for blist in blists:
            item['pic'] = blist.css("a.pic img::attr('src'),a.pic img::attr('data-original')").extract_first()
            item['title'] = blist.css("a.pic::attr('title')").extract_first()
            item['detail'] = blist.css("p.detail::text").extract_first()
            item['price'] = blist.css("p.price span.search_now_price::text").extract_first()
            item['author'] = blist.css("p.search_book_author span a::attr('title')").extract_first()
            item['publish'] = blist.re_first('a.*?name="P_cbs".*?>(.*?)</a>')
            item['time'] = blist.re_first("<span> /(.*?)</span>")
            print(item)
            yield item

        self.p += 1
        if self.p <= 5:
            next_url = 'http://search.dangdang.com/?key=python&page_index=' + str(self.p)
            url = response.urljoin(next_url)
            yield scrapy.Request(url=url,callback=self.parse)
