# -*- coding: utf-8 -*-
import scrapy
from dangdang.items import DangdangItem


class DanglistSpider(scrapy.Spider):
    name = 'danglist'
    allowed_domains = ['search.dangdang.com']
    start_urls = ['http://search.dangdang.com/?key=python&act=input']

    p = 1
    def parse(self, response):
        # 获取图书列表
        detailbook = response.selector.css("ul.bigimg li")
        # 实例化图书对象
        item = DangdangItem()
        # 循环填入图书信息
        for book in detailbook:
            item['title'] = book.css("a::attr(title)").extract_first()
            item['detail'] = book.css("p.detail::text").extract_first()
            item['price'] = book.css("span.search_pre_price::text").extract_first()
            item['publish'] = book.css("p.search_book_author span a::text").extract()[-1]
            item['pic'] = book.css("a.pic img::attr(data-original)").extract_first()
            auth = book.css("p.search_book_author span a::text").extract()[0:-1]
            if auth == None:
                auth = book.css("p.search_book_author span::text").extract_first()
            # 存在多个作者的情况，数据是列表格式，转成字符串
            item['author'] = "".join(auth)
            # print(item)
            yield item

        self.p += 1
        if self.p < 53:
            print(str(self.p)+("+"*70))
            next_url = 'http://search.dangdang.com/?key=python&act=input&page_index=' + str(self.p)
            url = response.urljoin(next_url)
            yield scrapy.Request(url=url, callback=self.parse)
