#coding=utf-8

import sys
import urlparse
import binascii

from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.utils.url import urljoin_rfc
from scrapy.http import Request 

from snailSpider.items import PostItem


def log_info(msg):
	print >> sys.stderr, msg
	
def get_base_url(response):
	return response.url

dup_expert_urls = {}
dup_author_urls = {}
dup_post_urls = {}

class CSDNSpider(BaseSpider):
	name = "csdn.net"
	allowed_domains = ["csdn.net"]
	start_urls = [
		#"http://blog.csdn.net/experts.html",
		"http://blog.csdn.net/index.html",
	]

	def __init__(self):
		self.spider_data_file = 'spider_%s_data_file.dat' % self.name
		self.fp = open(self.spider_data_file, 'a')

	# 从start_urls开始抓取的url解析函数统一走parse回调
	# 内部调用parse_experts
	def parse(self, response):
		return self.parse_experts(response)

	# 解析专家页面
	def parse_experts(self, html_response):
		# like yield Request(url, callback=self.parse_item)
		base_url = get_base_url(html_response)
		log_info('get response from %s' % (base_url))
		x = HtmlXPathSelector(html_response)

		# 侧边栏
		nav_links = []
		nav_links = x.select('//div [@class="side_nav"]/ul/li/a/@href').extract()
		for url in nav_links:
			if url in dup_author_urls:
				dup_author_urls[url] += 1
				continue
			dup_author_urls[url] = 1
			if url and not url.startswith('http:'):
				url = urlparse.urljoin(base_url, url)
			log_info('yield nav_page: ' + url )
			yield Request(url, callback=self.parse_experts)

		# 提取作者主页
		url_list = []
		url_list = x.select('//div [@class=\"blog_list\"]/div/span/a[@class=\"user_name\"]/@href').extract()
		for url in url_list:
			if url in dup_author_urls:
				dup_author_urls[url] += 1
				continue
			dup_author_urls[url] = 1
			if url and not url.startswith('http:'):
				#url = urljoin_rfc(base_url, url)
				url = urlparse.urljoin(base_url, url)
			log_info('yield author`s home Request: ' + url)
			yield Request(url, callback=self.parse_author_paper_list)  # 提交作者文章列表页

		# 提取分页信息
		page_links = []
		page_links = x.select("//div[@class=\"page_nav\"]/a/@href").extract()
		for page_url in page_links:
			if page_url in dup_expert_urls:
				dup_expert_urls[page_url] += 1 # count
				continue
			dup_expert_urls[page_url] = 1
			if page_url and not page_url.startswith('http:'):
				page_url = urlparse.urljoin(base_url, page_url)
			log_info('yield expert pageNav Request: ' + page_url)
			yield Request(page_url, callback=self.parse_experts)      # 提交专家分页

	# 解析作者文章列表页面
	def parse_author_paper_list(self, html_response):
		base_url = get_base_url(html_response)
		x = HtmlXPathSelector(html_response)
		post_list = []
		post_list = x.select("//div[@class=\"article_title\"]/h3/span/a/@href").extract()
		for url in post_list:
			if url in dup_post_urls:
				dup_post_urls[url] += 1
				continue
			dup_post_urls[url] = 1
			if url and not url.startswith('http:'):
				#url = urljoin_rfc(base_url, url)
				url = urlparse.urljoin(base_url, url)
			log_info('yield post Request: ' + url + " from " + base_url)
			#yield Request(url, callback=self.parse_detail_info)

		# 提取分页信息
		pages_links = []
		pages_links = x.select("//div[@class=\"pagelist\"]/a/@href").extract()
		for url in pages_links:
			if url and not url.startswith('http:'):
				url = urlparse.urljoin(base_url, url)
			if url in dup_post_urls:
				dup_post_urls[url] += 1
				continue
			dup_post_urls[url] = 1
			log_info('yield post_page Request: ' + url + " from " + base_url)
			#yield Request(url, callback=self.parse_author_paper_list) # 分页信息

	# 具体的文章页面(直接保存，不提取内容)
	def parse_detail_info(self, html_response):
		base_url = get_base_url(html_response)
		log_info('fetch %s success' % base_url)

		x = HtmlXPathSelector(html_response)

		item = PostItem()
		item['url'] = base_url
		item['post_title'] = x.select("//div[@class=\"article_title\"]/h3/span/a/text()").extract()[0]
		#item['post_content'] = x.select("//div[@class=\"article_content\"]").extract()[0]
		item['post_content'] = binascii.hexlify(html_response.body)
		yield item 

	def store_infile(self, item):
		value = '{"source":"blog.csdn.net", "URL":"%s", "CONTENT":"%s"}' % (item['url'], item['post_content'])    # content已经编码
		print 'store to %s url: %s' % (self.spider_data_file, item['url'])
		self.fp.write('%s\n' % value)
		self.fp.flush()
	
