# -*- coding: utf-8 -*-
import scrapy
import re
from dangdang.items import DangdangItem

class DdSpider(scrapy.Spider):
    name = 'dd'
    allowed_domains = ['dangdang.com']
    start_urls = ['http://search.dangdang.com/?key=python&act=input&page_index=1']

    def parse(self, response):
        print("="*100)
        #解析当前book信息
        #获取当前请求页面的所有book信息
        dlist = response.selector.css("ul.bigimg li")
        #遍历课程信息并封装到item类  信息有标题、url地址、图片、价格、作者、出版日期、出版社、评价数、简介
        for vo in dlist:
            item = DangdangItem()
            #标题
            item['title'] = vo.css("a.pic::attr(title)").extract_first()
            #url地址
            item['url'] = vo.css("p.name>a::attr(href)").extract_first()
            #图片
            # piclist = []
            # piclist.append(vo.css("a.pic>img::attr(src)").extract_first())
            # piclist.append(vo.css("a.pic>img::attr(data-original)").extract_first())
            item['pic'] = vo.css("a.pic>img::attr(data-original)").extract_first()
            #价格
            item['price'] = vo.css("p.price>span.search_now_price::text").extract_first()
            # # 作者(少最后一个)
            item['author'] = vo.xpath(".//p[@class='search_book_author']/span/a[@name='itemlist-author'][1]/@title").extract_first()
            # #出版日期
            item['date'] = vo.xpath(".//p[@class='search_book_author']/span[2]/text()").extract_first()
            #出版社
            item['company'] = vo.css("p.search_book_author>span>a[name='P_cbs']::text").extract_first()
            #评价数
            item['comment'] = vo.css("p.search_star_line>a::text").extract_first()
            #简介（少4个简介）
            item['information'] = vo.css("p.detail::text").extract_first()

            print(item)
            print("="*200)
            yield item

            



            
