# -*- coding: utf-8 -*-
import scrapy
from dangdangwang.items import DangdangwangItem


class DangdangSpider(scrapy.Spider):
    name = 'dangdang'
    allowed_domains = ['dangdang.com']
    start_urls = ['http://search.dangdang.com/?key=python&act=input&page_index=1']

    def parse(self, response):
        # 第一步，获取url
        urls = response.xpath("//ul[@id='component_59']/li/a/@href").getall()
        for url in urls:
            yield scrapy.Request(url, callback=self.parse_detail, dont_filter=True)

    def parse_detail(self, response):
        item = DangdangwangItem()
        # 书名
        item['name'] = response.xpath("//div[@class='name_info']/h1/@title").get()
        # 简介
        book_desc = response.xpath("//div[@class='name_info']/h2/span/@title").get()
        # # 如果不为None值，则用strip去空格和特殊字符
        if book_desc is not None:
            book_desc = book_desc.strip()
        item['desc'] = book_desc
        # 作者
        # author = ''.join(response.xpath("//span[@id='author']//text()").getall()).split('作者:')[-1]
        author = response.xpath("////span[@id='author']//text()").getall()
        if author is not None:
            author = ''.join(author).split('作者:')[-1]

        item['author'] = author
        # 出版社
        item['pub'] = response.xpath("//div[@class='messbox_info']/span[2]/a/text()").get()
        # 出版时间
        pub_date = response.xpath("//div[@class='messbox_info']/span[3]/text()").get()
        # # 去空格
        if pub_date is not None:
            pub_date = pub_date.strip().split('出版时间:')[-1]
        item['pub_date'] = pub_date
        # 价格
        price = response.xpath("//p[@id='dd-price']/text()").getall()
        # # 数据处理
        price = ''.join(price).strip()
        item['price'] = price
        # URL
        item['url'] = response.url
        yield item

