import scrapy
from dangdang.items import DangdangItem

class DangSpider(scrapy.Spider):
    name = 'dang'
    allowed_domains = ['search.dang.com']
    start_urls = ['http://search.dangdang.com/?key=PYTHON&act=input&page_index=1']
    p = 1
 
    def parse(self, response):
        a=0
        print('='*70)
        dlist = response.selector.css("ul.bigimg li")
        for i in dlist:
            print('='*70)
            item = DangdangItem()
            item['pic'] = (i.css("a.pic img::attr(src)").extract_first())
            if item['pic'] == 'images/model/guan/url_none.png':
                item['pic'] = (i.css("a.pic img::attr(data-original)").extract_first())
            item['name'] = (i.css("p.name a::attr(title)").extract_first())
            item['author'] = (i.css("p.search_book_author a::attr(title)").extract_first())
            item['price'] = (i.css("p.price span.search_now_price::text").extract_first())
            item['time'] = (i.re_first("<span> /(.*?)</span>"))
            yield item
 
        self.p += 1
        if self.p < 3 :
            next_url = "http://search.dangdang.com/?key=PYTHON&act=input&page_index="+str(self.p)
            url = response.urljoin(next_url)
            print("*"*70)
            yield scrapy.Request(url= url,callback=self.parse, dont_filter=True)
