# -*- coding: utf-8 -*-
import scrapy,sys,os
reload(sys).setdefaultencoding('UTF-8')
from njupt.items import CrcaItem
import logging
from pyquery import PyQuery as pq


class crcaSpider(scrapy.Spider):
		name = "crca"
		allowed_domains = ["www.crca.com.cn"]
		'''start_urls = [
				"http://www.crca.com.cn/list.aspx?cid=20&page=1",
				]'''
		
		def start_requests(self):
				for i in range(1,101):
					next_page_url = "http://www.crca.com.cn/list.aspx?cid=20&page="+str(i)
					yield scrapy.Request(next_page_url,callback=self.parse)

		def parse(self,response):
			if response.status == 200:
				body = response.body
				doc = pq(body)
				for link in doc('.mm tr').find('a'):
					item = CrcaItem()
					item['title'] = pq(link).text()
					request = scrapy.Request('http://www.crca.com.cn/'+ pq(link).attr('href'),callback=self.parse_content)
					request.meta['item'] = item
					yield request

		def parse_content(self,response):
			if response.status == 200: 
				body = response.body.decode(response.encoding).encode('utf-8')
				doc = pq(body)
				footerBody = body[body.find("选自"):]
				book = footerBody[footerBody.find("《"):footerBody.find("》")+3]
				item = response.meta['item']
				item['book'] = book
				item['url'] = response.url
				logging.info("标题:%s,选自:%s" %(doc('.aTitle').text(),book))
				yield item
				#logging.info("标题:%s,选自:%s" %(doc('.aTitle').text(),book))

