# -*- coding: utf-8 -*-
import scrapy
from dangdang.items import DangdangItem


class DangdangspiderSpider(scrapy.Spider):
	name = 'dangdangSpider'
	allowed_domains = ['search.dangdang.com']
	#从第一页开始
	start_urls = ['http://search.dangdang.com/?key=python&act=input&page_index=1']
	def __init__(self,page_index = 1):
		self.page_index = page_index
		pageNum = int(input('请输入需要获取的页数：'))
		#默认5页
		self.pageNum = pageNum
		self.url = 'http://search.dangdang.com/?key=python&act=input&page_index='+ str(self.page_index)	

	def parse(self, response):
		bookList = response.css('div.shoplist li')

		for i in bookList:
			items = DangdangItem()
			items['bookName'] = i.css('a.pic::attr(title)').extract_first()
			items['bookUrl'] = i.css('a.pic::attr(href)').extract_first()
			items['bookImg'] = i.css('a.pic img::attr(data-original)').extract_first()  
			items['bookTitle'] = i.css('p.name a::attr(title)').extract_first()
			items['bookWriter'] = i.css('p.search_book_author span:nth-child(1) a::text').extract_first()
			items['bookPrice'] = i.css('p.price .search_now_price::text').extract_first()
			yield items

		self.page_index = self.page_index + 1
		if self.page_index < self.pageNum + 1:
			print('*'*12,'第',self.page_index,'页','*'*12)
			self.url = 'http://search.dangdang.com/?key=python&act=input&page_index='+ str(self.page_index)	
			yield scrapy.Request(self.url,callback = self.parse)



