#!/usr/bin/python
#coding=utf-8
u"""爬虫的基类，主要提供网络访问/功能入口/信号处理/通用函数等的定义和封装
"""
import logging
import cookielib
import urllib2
import urllib
import urlparse
import re
import sys
import datetime
import socket
##from lxml import etree
import htmlentitydefs
import os
from math import ceil
##import time
import signal
import errno
from threading import Event

# http deflate/gzip support
from gzip import GzipFile
from io import BytesIO
import zlib
import httplib


logging.basicConfig(level=logging.DEBUG,
##									format='%(thread)d %(asctime)s %(funcName)s %(message)s',
##									format='%(asctime)s %(name)s %(levelname)s %(funcName)s %(message)s',
									format='%(funcName)s %(message)s',
                  datefmt= '%H:%M:%S')

EXITEVENT = None
def signalHandler(sig, stackframe):
	u'''处理信号'''
	global EXITEVENT
	if sig == signal.SIGUSR1: # do nothing
		logging.info('\n\n%s received signal USR1 %s\n\n', '*'*20, '*'*20)
	if sig == signal.SIGTERM:
		logging.info('\n\n%s received signal TERM, set exit flag ... %s\n\n', '*'*20, '*'*20)
		if EXITEVENT:
			EXITEVENT.set()
		else:
			logging.info('EXITEVENT is None!')

def deflate(data):
	'''deflate support'''
	# zlib only provides the zlib compress format, not the deflate format;
	# so on top of all there's this workaround
	try:
		return zlib.decompress(data, -zlib.MAX_WBITS)
	except zlib.error:
		return zlib.decompress(data)

class ContentEncodingProcessor(urllib2.BaseHandler):
	'''A handler to add gzip capabilities to urllib request'''
	# add headers to requests
	def http_request(self, req):
		req.add_header('Accept-Encoding', 'gzip, deflate')
		return req

	# decode
	def http_response(self, req, resp):
		old_resp = resp
		# gzip
		if resp.headers.get('content-encoding') == 'gzip':
			gz = GzipFile(fileobj=BytesIO(resp.read()), mode='r')
			resp = urllib2.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
			resp.msg = old_resp.msg
		# deflate
		if resp.headers.get('content-encoding') == 'deflate':
			gz = BytesIO( deflate(resp.read()) )
			resp = urllib2.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)  # 'class to add info() and
			resp.msg = old_resp.msg
		return resp

	# https连接的压缩传输处理与http相同
	https_request = http_request
	https_response = http_response


def htmlentitydecode(s):
	u"""http://snipplr.com/view/15261/python-decode-and-strip-html-entites-to-unicode/"""
	# First convert alpha entities (such as &eacute;)
	# (Inspired from http://mail.python.org/pipermail/python-list/2007-June/443813.html)
	def entity2char(m):
		entity = m.group(1)
		if entity in htmlentitydefs.name2codepoint:
			return unichr(htmlentitydefs.name2codepoint[entity])
		return u" "  # Unknown entity: We replace with a space.
	t = re.sub(u'&(%s);' % u'|'.join(htmlentitydefs.name2codepoint), entity2char, s)

	# Then convert numerical entities (such as &#233;)
	t = re.sub(u'&#(\d+);', lambda x: unichr(int(x.group(1))), t)

	# Then convert hexa entities (such as &#x00E9;)
	return re.sub(u'&#x(\w+);', lambda x: unichr(int(x.group(1), 16)), t)


def chkLogin(func):
	def wrappedFunc(self, *args, **kwargs):
		if not self._signed():
			if not self._login():
				return None
		return func(self, *args, **kwargs)
	return wrappedFunc



class myHTTPDefaultErrorHandler(urllib2.HTTPDefaultErrorHandler):
	def http_error_default(self, req, fp, code, msg, hdrs):
		if code == 404:
			raise urllib2.HTTPError(req.get_full_url(), 404, '~~~ 404 ~~~', hdrs, fp)
##			logger=logging.getLogger(self.__class__.__name__)
##			logger.debug('access %s got %s:%s',req.get_full_url(),code,msg)
##			return fp

class myRedirectHandler(urllib2.HTTPRedirectHandler):
	def http_error_301(self, req, fp, code, msg, headers):
		pass
	def http_error_302(self, req, fp, code, msg, headers):
		pass


class PostGetter(object):
	''' base class for specific bbs process class'''
	socket_timeout = 20 # 默认超时
	socket_trytimes = 3 # 默认重试次数
	SOCKET_CONN_TIMED_OUT = 12345
	SOCKET_RECV_TIMED_OUT = 12346
	HTTPLIB_BAD_STATUS_LINE = 12347
	dft_html_encoding = 'gb2312'
	dft_img_encoding = 'utf-8'
	premovetag = re.compile('(<.*?>)', re.M|re.S)
	exclude_first_div_tag = re.compile(r'\A<div.*?>(.*?)</div>\Z', re.M|re.S)
	exclude_first_td_tag = re.compile(r'\A<td.*?>\s*(.*?)\s*</td>\Z', re.M|re.S)
	exclude_first_comment_tag = re.compile(r'\A<!-- .*?-->(.*?)<!-- .*?-->\Z', re.M|re.S)

	def __init__(self, cookie_file):
		super(PostGetter,self).__init__()
		self.cookie_file = cookie_file

		self._signed_in = False
		self.cj = cookielib.LWPCookieJar()
		try:
			self.cj.revert(cookie_file)
			self._signed_in = True
		except:
			None
		self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj),
			myHTTPDefaultErrorHandler,
			ContentEncodingProcessor)
		self.opener.addheaders = [('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3')]
##		urllib2.install_opener(self.opener) # 用自己的opener取代默认的
		# 带 proxy 的 opener
		self.proxy_opener = urllib2.build_opener(urllib2.ProxyHandler({'http':'http://127.0.0.1:8580'}),
				urllib2.HTTPCookieProcessor(self.cj),
				myHTTPDefaultErrorHandler,
				ContentEncodingProcessor)
		self.proxy_opener.addheaders = [('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3')]

		self.logger = logging.getLogger(self.__class__.__name__)

		self.exitevent = Event()
##		self.setSignalHandler()


	def _tryDecode(self, data):
		u'''尝试decode数据data
		'''
		for x in ('utf-8', 'gb18030', 'gb2312', 'big5'):
			try:
				d = data.decode(x)
				return d
			except UnicodeDecodeError as e:
				pass

		if len(data)==1:
			try:
				data = unichr(ord(data))
			except Exception as e:
				pass

		self.logger.debug('can\'t deocde, return unchanged data! len=%d, begin 100 chars:%s', len(data), repr(data[:100]))
		return data


	def _signed(self):
		u''' whether has login in
		'''
		return self._signed_in


	def _login(self, force=False):
		u'''登录
		'''
		if self._signed() and (not force):
			self.logger.debug('already signed in!')
			return self._signed()

		return self.login()


	def _getResponse(self, url, data=None, headers=None, **kwargs):
		u'''获得请求url的响应
		'''
		res, rurl, code = None, None, 0
		# use proxy if kwargs['useproxy']=True
		if kwargs.get('useproxy',False):
			opener=self.proxy_opener
##			self.logger.debug('%s use proxy',url)
		else:
			opener=self.opener

		req = urllib2.Request(url, urllib.urlencode(data) if data else None, headers if headers else {})

		for i in range(self.socket_trytimes): # try times
			if i != 0:
				self.logger.info('retry %d/%d %s ... ', i+1, self.socket_trytimes, url)
			try:
				r = opener.open(req, timeout=self.socket_timeout)
				if not r:
					res,rurl = '', ''
				else:
					res, rurl, code=r.read(), r.geturl(), r.getcode()

				if r and r.headers.get('content-length', None):
					if r.headers.get('content-encoding') not in ('gzip', 'deflate'):
						assert int(r.headers.get('content-length', None))==len(res)

##				if not r:
##					self.logger.info('get code %d with r==None !!!!',code)
				break
			except urllib2.HTTPError as e:
				code = e.code
				if e.code in (404,301,302):
##					self.logger.info('no more try for: %s',e)
					break
				self.logger.info('HTTPError！ %s',e)
			except urllib2.URLError as e:
				if isinstance(e.reason,socket.gaierror):
					code=e.reason.errno
					if e.reason.errno ==socket.EAI_NONAME: # Name or service not known 找不到服务器
##						logger.info('no more try for URLError  (EAI_NONAME), %s',e)
						if not self.onURLError_NoName(url):
							break
					else:
						self.logger.info('URLError socket.gaierror! %s',e)
				elif isinstance(e.reason,socket.timeout): # 建立连接超时？
##					logger.info('URLError (urlopen error timed out) | %s',imgurl.url)
					self.logger.info('connect timeout | %s',url)
					code=self.SOCKET_CONN_TIMED_OUT
##					break
				elif isinstance(e.reason,socket.error):
					code=e.reason.errno
					self.logger.info('connect error code=%d,msg=%s| %s',e.reason.errno,e.reason.strerror,url)
					if code in (errno.ECONNREFUSED,errno.ECONNRESET,errno.EHOSTUNREACH):
						if not self.onConnection_Refused_Reset_HostUnreach(url):
							break
				elif isinstance(e.reason,OSError):
					code=e.reason.errno
					if code in (errno.ENOENT,):
						break
				elif isinstance(e.reason,str):
					if e.reason.find('unknown url type')!=-1 or e.reason.find('no host given')!=-1:
						code=404
						break
				else:
					self.logger.info('%d URLError! %s, %s | %s',i,repr(e),e,url)
					break
			except socket.timeout as e: # 传输数据超时?
				self.logger.info('receive data timeout(%d) ! %s',i,url)
				code=self.SOCKET_RECV_TIMED_OUT
			except socket.error as e:
				self.logger.info('%d socket.error! %s',i,e)
			except IOError as e:
				self.logger.info('%d IOError! %s | %s',i,e,url)
			except StandardError as e:
				self.logger.info('StandardError! %s | %s',e,url)
				cont,code=self.onException(url)
				if not cont:
					break
			except httplib.InvalidURL as e:
				self.logger.info('InvalidURL! %s | %s',e,url)
				cont,code=self.onException(url)
				if not cont:
					break


		return (res,rurl,code)


	def onURLError_NoName(self,url):
		u'''定义当访问网络出现socket.EAI_NONAME错误时的动作
		如果返回 False 则不重试，返回 True 则可以重试(视当前重试次数而定)
		'''
		return False


	def onConnection_Refused_Reset_HostUnreach(self,url):
		u'''定义当访问网络出现 errno.ECONNREFUSED,errno.ECONNRESET,errno.EHOSTUNREACH 时的动作
		如果返回 False 则不重试，返回 True 则可以重试(视当前重试次数而定)
		'''
		return False


	def _getData(self,url,data,title=''):
		'''获取数据, 支持自动登陆
		'''
		ok=False
		for _ in xrange(3):
##			self.logger.debug('get date from url %s: %s...',title,url)
			rtndata,rurl,code=self._getResponse(url,data)
			if rurl!=url:
##				self.logger.debug('need login ? %s!= expected %s',rurl,url)
				if not self._login(True):
					self.logger.debug('can\'t login, do nothing')
					return
				continue # login ok, retry the url
			else:
				ok=True
				break

		if not ok:
			self.logger.debug('data not ok, do nothing')
			return ''

		return self._tryDecode(rtndata)


	def onException(self,url):
		u'''定义当访问网络出现 Exception 时的动作
		如果返回 False 则不重试，返回 True 则可以重试(视当前重试次数而定)
		'''
		return False,0


	def doWork(self,forum_name,sector_name,time_since,page_start,page_stop):
		u'''执行任务，为类的主要功能入口
		'''
		self.logger.info('%s\n\n','~~~'*30)
		self.logger.info('forum=%s,sector=%s,time_since=%s,pagerange [%d,%d)',forum_name,sector_name,
										time_since,page_start,page_stop)
		self.getPostList(forum_name,sector_name,time_since,page_start,page_stop)


	def getPageRange(self,numperpage,old,new=0):
		'''计算页号范围，假定每页numperpage个帖子，old和new为帖子数量（此值不包括主题贴）。
		如果new不为0，表示返回从old开始的pagenumber。
		对于首页帖子数量为 numperpage+1（主题贴） 的页面，需要先将回帖数-1后再作为old和new的参数传入
		'''
		if new!=0: # 从上次结束的 pagenumber 开始
			return ( int(ceil((old+1)/float(numperpage))), int( ceil((new+1)/float(numperpage)) )+1 )
		else:
			return ( 1, int( ceil((old+1)/float(numperpage)) )+1 )


	def setExitFlag(self):
		u'''设置退出标志
		'''
		if self.exitevent and not self.exitevent.is_set():
			self.exitevent.set()


	def setSocket(self,timeout=None,trytimes=None):
		u''' 设置网络连接的超时和重试次数
		'''
		if timeout:
			self.socket_timeout=timeout
		if trytimes:
			self.socket_trytimes=trytimes


	def setSignalHandler(self):
		global EXITEVENT
		EXITEVENT=self.exitevent
		if sys.platform=='linux2':
			# 注册信号处理程序
			signal.signal(signal.SIGUSR1,signalHandler)
			signal.signal(signal.SIGTERM,signalHandler)


	def __getstate__(self):
		self.logger.debug('get state called!')
		odict=self.__dict__.copy()
		del odict['logger']
		del odict['opener']
		del odict['cj']
		del odict['exitevent']
		return odict


	def __setstate__(self,d):
		self.__dict__.update(d)
		self.cj = cookielib.LWPCookieJar()
		self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj),
			myHTTPDefaultErrorHandler,
			ContentEncodingProcessor)
		self.opener.addheaders=[('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3')]
		self.logger=logging.getLogger(self.__class__.__name__)
		self.logger.setLevel(logging.INFO)

	def login(self):
		raise StandardError('subclass should implement this !')
