#!/usr/bin/python
# encoding=utf-8

__version__ = '1.2'

"""爬虫入口模块

递归爬取urls时，递归的对象有两种：
1、爬虫策略：递归的urls
2、目标URL策略：目标获取的urls

模块依赖：
依赖kscomm.generic下的threadpool线程池模块

@Knownsec 2009
code by cosine 2010-05
"""

import sys
import re
import time
import socket
import urllib2
import urlparse
import cookielib
from fetch import HttpFetch
from auth import Auth
from parses.htmlparse import HtmlParse
from kscomm.generic.threadpool import ThreadPool

class KsspiderError(Exception):
	pass

class Spider:
	
	def __init__(self,url,entry='',deep=0,urls_limit=0,crawl_scope=1,url_scope=1,url_type=1,crawl_tags=[],url_tags=[],\
	             thread_num=0,timeout=10,sleep=0,referer='',user_agent='',cookies='',proxy='',\
	             includes=[],excludes=[]):
		"""
		初始化参数，参数说明如下：
		
		deep - 爬取深度，默认0表示不爬行
		urls_limit - 爬取urls总数上限，默认0表示无限制
		
		crawl_scope
		爬行策略：
		1 - 当前严格域下的资源
		2 - 当前顶级域（包括其它子域）下的资源
		3 - 除了包括前面的资源还包括外域资源
		
		url_scope
		目标URL策略：
		1 - 当前严格域下的资源
		2 - 当前顶级域（包括其它子域）下的资源
		3 - 除了包括前面的资源还包括外域资源
		4 - 当前目录下的资源
		
		url_type
		目标URL类型：
		1 - url, http://www.example.com/hi.htm
		2 - site, http://www.example.com/
		
		crawl_tags
		爬行的链接类型：
		["a","area","iframe","frame","meta"]
		默认：["a","area","iframe","frame"]
		
		url_tags
		获取的链接类型：
		["a","area","iframe","frame","meta","script","form","img","style"]
		默认：["a","area","iframe","frame"]
		
		thread_num - 线程池大小，默认0表示不开启线程池
		timeout - HTTP请求超时
		sleep - 每次HTTP请求休息时间，单位秒
		referer - 请求来源
		user_agent - 请求的浏览器类型
		cookies - 请求的cookies会话信息
		proxy - HTTP代理，格式如：http://url:port
		includes - 必须爬行的urls列表
		excludes - 禁爬的urls规则列表
		
		"""
		self.start_time = time.time()
		self.crawl_tags = ["a","area","iframe","frame","meta"]
		self.url_tags = ["a","area","iframe","frame","meta","script","form","img","style"]
		
		self.url = self.__add_protocal(url)
		self.entry = entry
		self.deep = deep
		self.urls_limit = urls_limit
		self.crawl_scope = crawl_scope
		self.url_scope = url_scope
		self.url_type = url_type
		
		for tag in crawl_tags:
			if tag not in self.crawl_tags:
				err_msg = "crawl tag '"+str(tag)+"' not in "+str(self.crawl_tags)
				raise KsspiderError,err_msg
		for tag in url_tags:
			if tag not in self.url_tags:
				err_msg = "url tag '"+str(tag)+"' not in "+str(self.url_tags)
				raise KsspiderError,err_msg
		
		if not crawl_tags:
			self.crawl_tags = ["a","area","iframe","frame"]
		else:
			self.crawl_tags = crawl_tags
		if not url_tags:
			self.url_tags = ["a","area","iframe","frame"]
		else:
			self.url_tags = url_tags
		self.tags = list(set(self.crawl_tags+self.url_tags))
		
		self.thread_num = thread_num
		if thread_num:
			self.tpool = ThreadPool(thread_num,1)
		else:
			self.tpool = None
		
		self.timeout = timeout
		self.sleep = sleep
		
		self.referer = referer
		self.user_agent = user_agent
		self.cookies = cookies
		self.proxy = proxy
		
		self.includes = includes
		self.excludes = self.__regx_compile(excludes) # 编译后的正则实例列表
		
		self.call_func = None
		self.call_argv_list = []
		
		self.crawl_history = {}
		self.callback_url_history = []
		
		self.auth_handler  = urllib2.BaseHandler()
		self.proxy_handler = urllib2.BaseHandler()
		
		self.set_http_timeout(self.timeout)
		
		self.auth = Auth()
		self.parse = HtmlParse()
		self.http_opener = None
		
	def set_http_timeout(self,timeout):
		"""设置http请求超时时间"""
		socket.setdefaulttimeout(timeout)
		
	def set_http_cookies(self,cookies):
		"""设置http请求的cookies会话信息"""
		self.cookies = cookies
		
	def set_http_referer(self,referer):
		"""eg: http://www.example.com/"""
		self.referer = referer
		
	def set_http_useragent(self,user_agent):
		"""eg: Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7 (.NET CLR 3.5.30729)"""
		self.user_agent = user_agent

	def set_headers(self,referer='',user_agent='',cookies=''):
		"""设置http请求头"""
		self.referer = referer
		self.user_agent = user_agent
		self.cookies = cookies
		
	def set_http_proxy(self,proxy):
		"""设置http代理 TODO: https"""
		return self.auth.set_http_proxy(proxy)
		
	def set_http_auth(self,auth_url,auth_type,username,password):
		"""设置http认证：Basic/Digest/NTLM"""
		return self.auth.set_http_auth(auth_url,auth_type,username,password)
			
	def set_form_auth(self,auth_url,auth_field_dict):
		"""设置form登录认证"""
		self.set_headers(referer=self.referer,user_agent=self.user_agent,cookies=self.cookies)
		return self.auth.set_form_auth(auth_url,auth_field_dict)
	
	def __set_http_opener(self):
		"""获取http请求句柄"""
		self.http_opener = self.auth.get_http_opener()
		
	def set_callback(self,func,argv_list):
		"""爬行过程中设置回调函数"""
		self.call_func = func
		self.call_argv_list = argv_list
		
	def __run_callback(self,url,deep,html,headers,code):
		"""运行回调函数"""
		url = self.parse.get_suburl_by_type(url,self.url_type)
		if url not in self.callback_url_history:
			self.callback_url_history.append(url)
		else:
			return
		
		content_type = ''
		if 'html' in self.call_argv_list or 'headers' in self.call_argv_list or 'code' in self.call_argv_list or 'content_type' in self.call_argv_list:
			if not html and not headers and not code:
				f = HttpFetch()
				f.set_http_opener(self.http_opener)
				f.set_headers(referer=self.referer,user_agent=self.user_agent,cookies=self.cookies)
				f.request(url)
				html = f.get_html()
				headers = f.get_headers()
				code = f.get_code()
				content_type = self.parse.get_content_type(url,headers)
		
		call_argv_dict = {}
		if 'url' in self.call_argv_list:
			call_argv_dict['url'] = url
		if 'deep' in self.call_argv_list:
			call_argv_dict['deep'] = deep
		if 'html' in self.call_argv_list:
			call_argv_dict['html'] = html
		if 'headers' in self.call_argv_list:
			call_argv_dict['headers'] = headers
		if 'code' in self.call_argv_list:
			call_argv_dict['code'] = code
		if 'content_type' in self.call_argv_list:
			call_argv_dict['content_type'] = content_type
			
		self.call_func(call_argv_dict)
		
	def start(self):
		"""开始爬虫"""
		self.__set_http_opener()
		args = {'url':self.url,'deep':0,'tag':'a'}
		self.__re_crawler(args)
		if self.tpool:
			self.tpool.waitforComplete()
			
		self.stop_time = time.time()
		
	def __re_crawler(self,args):
		"""爬虫递归爬行"""
		url = args['url']
		deep = args['deep']
		tag = args['tag']
		html = headers = code = None
		broken = False
		# 判断获取的urls是否达到了上限，以下有好几处这样的判断，原因是多线程情况，随时都有可能出现变化
		if self.__enough_urls(): 
			return
		if deep < self.deep:
			f = HttpFetch()
			f.set_http_opener(self.http_opener)
			f.set_headers(referer=self.referer,user_agent=self.user_agent,cookies=self.cookies)
			f.request(url)
			html = f.get_html()
			headers = f.get_headers()
			code = f.get_code()
			broken = f.broken
			
		if self.__enough_urls():
			return
		if self.crawl_scope == self.url_scope:
			url_format = self.parse.get_suburl_by_type(url,self.url_type)
			if url != url_format:
				if tag in self.url_tags:
					self.__run_callback(url,deep,None,None,None)
			else:
				if tag in self.url_tags:
					self.__run_callback(url,deep,html,headers,code)
		
		if self.__enough_urls():
			return
		if deep < self.deep and not broken:
			suburls = {}
			suburls = self.parse.get_suburls(url,html,tags=self.tags)
			
			crawl_suburls = {} # 目标爬行的urls
			not_crawl_suburls = {} # 非目标爬行的urls
			for tag in suburls:
				if tag in self.crawl_tags:
					crawl_suburls[tag] = suburls[tag]
				else:
					not_crawl_suburls[tag] = suburls[tag]
					
			for tag in not_crawl_suburls:
				if tag not in self.url_tags:
					continue
				for u in not_crawl_suburls[tag]:
					if self.__enough_urls():
						return
					url_scope_boolean = self.parse.is_suburl_by_scope(u,self.url,self.url_scope)
					if url_scope_boolean:
						self.__run_callback(u,deep,None,None,None)
			
			for tag in crawl_suburls:
				for u in crawl_suburls[tag]:
					if self.__enough_urls():
						return
					url_scope_boolean = self.parse.is_suburl_by_scope(u,self.url,self.url_scope)
					if url_scope_boolean and self.crawl_scope != self.url_scope:
						if tag in self.url_tags:
							self.__run_callback(u,deep,None,None,None)
					
					if self.__is_exclude_url(u): # 判断是否在禁爬列表里
						continue
						
					if self.parse.is_blackext(u): # 判断是否在黑名单后缀里
						continue
											
					if u in self.crawl_history:
						if deep < self.crawl_history[u]:
							continue
					self.crawl_history[u] = deep
					crawl_scope_boolean = self.parse.is_suburl_by_scope(u,self.url,self.crawl_scope)
					if crawl_scope_boolean:
						if self.tpool:
							self.tpool.putToQueue(self.__re_crawler,url=u,deep=deep+1,tag=tag)
						else:
							args = {'url':u,'deep':deep+1,'tag':tag}
							self.__re_crawler(args)
		else:
			pass
		
	def __enough_urls(self):
		"""判断获取的urls是否达到了上限"""
		if self.urls_limit == 0: # 默认为0，无限制
			return False
		if len(self.callback_url_history) >= self.urls_limit:
			return True
		else:
			return False
		
	def __regx_compile(self,regxs):
		"""预编译正则列表"""
		patterns = []
		for regx in regxs:
			try:
				p = re.compile(regx,re.I)
				patterns.append(p)
			except Exception,e:
				print 'regx compile error: %s'%e
		return patterns
		
	def __is_exclude_url(self,url):
		"""判断是否是禁爬url"""
		excludes = self.excludes
		for p in excludes:
			try:
				if p.search(url):
					return True
			except Exception,e:
				print 'regx search error: %s'%e
		return False
		
	def __consume_time(self, secs):
		mins, secs = divmod(secs, 60)
		hours, mins = divmod(mins, 60)
		return '%02d:%02d:%02d' % (hours, mins, secs)
	
	def __add_protocal(self, url):
		"""格式化：添加协议头"""
		if not url.lower().startswith(('http://','https://')):
			return 'http://'+url
		return url
		
	def get_time_statistic(self):
		"""时间统计"""
		print '\n'+('='*37)
		print 'target:',self.url
		print 'start time:',time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.start_time))
		print 'stop time:',time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.stop_time))
		print 'count:',len(self.callback_url_history)
		print 'consumed time:',self.__consume_time(self.stop_time - self.start_time)
		
if __name__ == '__main__':
	print '"pls run test_programs in ./tests/"\ncosine says.'
	
