#!/usr/bin/env python
#coding=utf-8

from bs4 import BeautifulSoup
import urlparse

from CSDN_Items import post_detail_item

def url_uniform(base_url, url):
	if not base_url or not url:
		return None
	if not url.startswith('http:'):
		url = urlparse.urljoin(base_url, url)
	return url

def print_list(l):
	for i in l:
		print i

def urls_uniform(base_url, new_links):
	# 虑重
	dup_filter = {}
	
	new_links_uniform = []
	for url in new_links:
		url = url_uniform(base_url, url)  # 相对路径的uri，转化为标准的url
		if url in dup_filter:
			continue
		dup_filter[url] = 1

		new_links_uniform.append(url)

	#new_links = new_links_uniform
	return new_links_uniform 
	
# 提交新链
def submit_new_links_default(base_url, new_links):
	if not new_links or len(new_links) <= 0:
		return

	# submit
	print 'use submit_new_links_default'
	print 'base_url:', base_url
	for url in new_links:
		print ' ', url

# 文章详情内容处理
def submit_post_info_default(post_info):
	if not post_info:
		return
	print 'use submit_post_info_default '
	#print post_info
	
	
# 提取一下信息
# 专家页中的作者信息，提取作者及其主页
# 专家页中的分页信息
def csdn_is_expert_page_need_tag(tag):
	# 推荐专家
	is_recom_expert = (tag.name == 'ul' and (tag.has_attr('class') and tag['class'][0] == 'list_3') and (tag.has_attr('id') and tag['id'] == 'experts'))
	if is_recom_expert:
		return True
		
	# 最新文章
	is_author_link = (tag.name=='a' and (tag.has_attr('class') and tag['class'][0] == "user_name") and tag.has_attr('href'))
	# 作者及作者主页链接
	if is_author_link:
		return True

	# 分页信息
	is_expert_page_nav = (tag.name == 'div' and (tag.has_attr('class') and tag['class'][0] == "page_nav"))
	if is_expert_page_nav:
		return True

	return False

# 提取专家页
#   作者及作者主页链接
#   专家页中的分页信息
def csdn_extract_expert_page(base_url, html_doc, dict=None, submit_new_links_cb=None, submit_post_info_cb=None):
	new_links = []

	soup = BeautifulSoup(html_doc)
	need_tags = soup.find_all(csdn_is_expert_page_need_tag)
	for tag in need_tags:
		# 推荐专家
		is_recom_expert = (tag.name == 'ul' and (tag.has_attr('class') and tag['class'][0] == 'list_3') and (tag.has_attr('id') and tag['id'] == 'experts'))
		if is_recom_expert:
			for expert in tag.find_all('a'):
				author_link = expert.get('href')
				new_links.append(author_link)
			continue
		
		# 最新文章
		is_author_link = (tag.name=='a' and (tag.has_attr('class') and tag['class'][0] == "user_name") and tag.has_attr('href'))
		if is_author_link:
			#author_name = tag.string
			author_home_page_link = tag.get('href')
			new_links.append(author_home_page_link)
			continue

		# 分页信息
		is_expert_page_nav = (tag.name == 'div' and (tag.has_attr('class') and tag['class'][0] == "page_nav"))
		if is_expert_page_nav:
			for link in tag.find_all('a'):
				page_url = link.get('href')
				new_links.append(page_url)
		
			continue

	# 提交新链
	uniform_new_links = urls_uniform(base_url, new_links)
	if submit_new_links_cb:
		submit_new_links_cb(base_url, uniform_new_links)
	else:
		submit_new_links_default(base_url, uniform_new_links)


# 提取作者主页文章列表
def csdn_is_author_post_list_page_need_tag(tag):
	# 提取文章详情页链接
	is_post_detail_link = (tag.name == 'div' and (tag.has_attr('class') and tag['class'][0] == 'article_title'))
	if is_post_detail_link:
		return True

	# 分页信息
	is_post_page_nav = (tag.name == 'div' and (tag.has_attr('class') and (tag['class'][0] == 'pagelist')))
	if is_post_page_nav:
		return True

	return False

def csdn_extract_author_post_list_page(base_url, html_doc, dict=None, submit_new_links_cb=None, submit_post_info_cb=None):
	new_links = []
	soup = BeautifulSoup(html_doc)
	need_tags = soup.find_all(csdn_is_author_post_list_page_need_tag)
	for tag in need_tags:
		# 提取文章详情页链接
		is_post_detail_link = (tag.name == 'div' and (tag.has_attr('class') and tag['class'][0] == 'article_title'))
		if is_post_detail_link:
			post_detail_link = tag.find('a').get('href')
			new_links.append(post_detail_link)
			continue

		# 分页信息
		is_post_page_nav = (tag.name == 'div' and (tag.has_attr('class') and (tag['class'][0] == 'pagelist')))
		if is_post_page_nav:
			for link in tag.find_all('a'):
				url = link.get('href')
				new_links.append(url)
			continue

	# 提交新链
	uniform_new_links = urls_uniform(base_url, new_links)
	if submit_new_links_cb:
		submit_new_links_cb(base_url, uniform_new_links)
	else:
		submit_new_links_default(base_url, uniform_new_links)

def csdn_is_author_post_detail_page_need_tag(tag):
	# 作者信息(作者名、作者主页)
	is_author_info_div = (tag.name == 'div' and (tag.has_attr('id') and tag['id'] == 'blog_userface'))
	if is_author_info_div:
		return True
	
	# 文章title
	is_post_title_span = (tag.name == 'span' and (tag.has_attr('class') and tag['class'][0] == 'link_title'))
	if is_post_title_span:
		return True

	# 分类
	is_post_type_span = (tag.name == 'span' and (tag.has_attr('class') and tag['class'][0] == 'link_categories'))
	if is_post_type_span:
		return True

	# 发布时间
	is_post_date_span = (tag.name == 'span' and (tag.has_attr('class') and tag['class'][0] == 'link_postdate'))
	if is_post_date_span:
		return True


	# 阅读次数
	#is_post_read_count_span = (tag.name == 'span' and (tag.has_attr('class') and tag['class'][0] == 'link_view')
	#if is_post_read_count_span:
	#	return True

	# 内容
	is_post_content = (tag.name == 'div' and (tag.has_attr('class') and tag['class'][0] == 'article_content') and (tag.has_attr('id') and tag['id'] == 'article_content'))
	if is_post_content:
		return True

def csdn_extract_author_post_detail_page(base_url, html_doc, dict=None, submit_new_links_cb=None, submit_post_info_cb=None):
	new_links = []

	post_info = post_detail_item()

	soup = BeautifulSoup(html_doc)
	need_tags = soup.find_all(csdn_is_author_post_detail_page_need_tag)
	for tag in need_tags:
		# 作者信息(作者名、作者主页)
		is_author_info_div = (tag.name == 'div' and (tag.has_attr('id') and tag['id'] == 'blog_userface'))
		if is_author_info_div:
			home_page = tag.find('span').find('a').get('href')
			author_name = tag.find('span').find('a').get_text()
			post_info.add('author_name', author_name)
			post_info.add('home_page', home_page)
			continue
	
		# 文章title
		is_post_title_span = (tag.name == 'span' and (tag.has_attr('class') and tag['class'][0] == 'link_title'))
		if is_post_title_span:
			post_title = ""
			for string in tag.find_all('a')[0].stripped_strings:
				post_title += string

			post_info.add('post_title', post_title)
			continue

		# 分类
		is_post_type_span = (tag.name == 'span' and (tag.has_attr('class') and tag['class'][0] == 'link_categories'))
		if is_post_type_span:
			post_tags = ""
			for str in tag.find_all('a')[0].stripped_strings:
				post_tags += " " + str
			post_info.add('post_tags', post_tags)
			continue

		# 发布时间
		is_post_date_span = (tag.name == 'span' and (tag.has_attr('class') and tag['class'][0] == 'link_postdate'))
		if is_post_date_span:
			post_date = tag.string
			post_info.add('post_date', post_date)
			continue


		# 阅读次数
		#post_read_count = ""
		#is_post_read_count_span = (tag.name == 'span' and (tag.has_attr('class') and tag['class'][0] == 'link_view')
		#if is_post_read_count_span:
		#	post_count = tag.string
		#	continue

		# 内容
		post_content = ""
		is_post_content = (tag.name == 'div' and (tag.has_attr('class') and tag['class'][0] == 'article_content') and (tag.has_attr('id') and tag['id'] == 'article_content'))
		if is_post_content:
			post_content = tag.get_text()
			post_info.add('post_content', post_content)
			continue

	if len(post_info) > 0:
		post_info.add('post_url', base_url)

	# 处理提取的结果
	if submit_post_info_cb:
		submit_post_info_cb(post_info)
	else:
		submit_post_info_default(post_info)

        # 提交新链
	uniform_new_links = urls_uniform(base_url, new_links)
	if submit_new_links_cb:
		submit_new_links_cb(base_url, uniform_new_links)
	else:
		submit_new_links_default(base_url, uniform_new_links)


# 根据url和页面确定页面类型
def csdn_page_type(url, html_doc, d=None):
	'''
	"CSDN_PAGE_TYPE_UNKNOWN"
	"CSDN_PAGE_TYPE_EXPERT"
	"CSDN_PAGE_TYPE_POST_LIST"
	"CSDN_PAGE_TYPE_POST_DETAIL"
	"CSDN_PAGE_TYPE_AUTHOR_HOME"
	'''
	if d and 'CSDN_PAGE_TYPE' in d:
		return d['CSDN_PAGE_TYPE']

	if url.find('experts') != -1:
		return 'CSDN_PAGE_TYPE_EXPERT'
	elif url.find('detail') != -1:
		return 'CSDN_PAGE_TYPE_POST_DETAIL'
	else:
		return 'CSDN_PAGE_TYPE_POST_LIST'

def csdn_extract_main(url, html_doc, dict=None, submit_new_links_cb=submit_new_links_default, submit_post_info_cb=submit_post_info_default):
	result = None
	page_type = csdn_page_type(url, html_doc, dict)
	if page_type == "CSDN_PAGE_TYPE_EXPERT":
		print 'CSDN_PAGE_TYPE_EXPERT'
		csdn_extract_expert_page(url, html_doc, dict, submit_new_links_cb, submit_post_info_cb)
	elif page_type == "CSDN_PAGE_TYPE_POST_LIST":
		print 'CSDN_PAGE_TYPE_POST_LIST'
		csdn_extract_author_post_list_page(url, html_doc, dict, submit_new_links_cb, submit_post_info_cb)
	elif page_type == "CSDN_PAGE_TYPE_POST_DETAIL":
		print 'CSDN_PAGE_TYPE_POST_DETAIL'
		csdn_extract_author_post_detail_page(url, html_doc, dict, submit_new_links_cb, submit_post_info_cb)
	else:
		print >>sys.stderr, "Unknown page type: %s" % page_type

	return None

def submit_new_links_to_redis_test(base_url, new_links_list):
        if not new_links_list or len(new_links_list) <= 0:
                print 'submit_new_links_to_redis_test no new links'
                return

        new_links_redislist_name = 'new_links_redislist'
	print 'submit base: %s %d sub_urls' % (base_url, len(new_links_list))
	global r
        for new_url in new_links_list:
		print 'submit %s' % new_url
                r.lpush(new_links_redislist_name, new_url)      # FIFO, TODO: 实现优先级, JSON

if __name__ == "__main__":
	import urllib2
	dict = {'CSDN_PAGE_TYPE': 'CSDN_PAGE_TYPE_EXPERT'}
	url = 'http://blog.csdn.net/experts.html?page=2'
	#dict = {'CSDN_PAGE_TYPE': 'CSDN_PAGE_TYPE_POST_LIST'}
	#url = 'http://blog.csdn.net/tankles'
	#dict = {'CSDN_PAGE_TYPE': 'CSDN_PAGE_TYPE_POST_DETAIL'}
	#url = 'http://blog.csdn.net/tankles/article/details/7663311'
	#url = 'http://blog.csdn.net/ex_net/article/details/8744089'
	# download page
	request = urllib2.Request(url)
	request.add_header('User-Agent', 'spider-test-client')
	response = urllib2.urlopen(request)
	html_doc = response.read()
	# process
	import redis
	r = redis.StrictRedis(host='127.0.0.1', port=6379)
	submit_new_links_cb = submit_new_links_to_redis_test
	submit_post_info_cb = None
	#csdn_extract_main(url, html_doc, dict)
	csdn_extract_main(url, html_doc, dict, submit_new_links_cb, submit_post_info_cb)


