#!/usr/bin/python
#coding=utf-8
"""
reference: http://doc.scrapy.org/topics/architecture.html
"""
import logging
import urlparse
import re
import sys
import datetime
from lxml import etree
import os
import time
import socket
import hashlib
from math import ceil
from cStringIO import StringIO
from PIL import GifImagePlugin, Image, ImageFilter, ImageEnhance
import _mysql_exceptions
##from captchaTool import CGetImg
from multiprocessing import Process, Queue, Pool, Lock, get_logger
from multiprocessing.managers import SyncManager
import errno
from Queue import Full, Empty
import json
import ConfigParser
import codecs
from optparse import OptionParser
from PostGetter import PostGetter
from PostGetter import htmlentitydecode, ContentEncodingProcessor, chkLogin
from pprint import pprint
import signal

logging.basicConfig(
		level=logging.DEBUG,
##	format='%(thread)d %(asctime)s %(funcName)s %(message)s',
##	format='%(asctime)s %(name)s %(levelname)s %(funcName)s %(message)s',
		format = '%(funcName)s %(message)s',
		datefmt = '%H:%M:%S')

EXITEVENT = None


def signalHandler(sig, stackframe):
	u'''处理信号'''
	global EXITEVENT
	if sig == signal.SIGUSR1: # do nothing
		logging.info('\n\n%s received signal USR1 %s\n\n', '*' * 20, '*' * 20)
	if sig == signal.SIGTERM:
		logging.info('\n\n%s received signal TERM, set exit flag ... %s\n\n', '*' * 20, '*' * 20)
		if EXITEVENT:
			EXITEVENT.set()
		else:
			logging.info('EXITEVENT is None!')


class UrlScheduler(object):
	PG_WAIT, PG_PRO, PG_DONE = '1', '2', '3'

	def __init__(self, url_queue, url_hist, lck):
		super(UrlScheduler, self).__init__()
		self.url_queue = url_queue
		self.url_hist = url_hist
		self.lck = lck

		self.strStat = {'1': 'PG_WAIT', '2': 'PG_PRO', '3': 'PG_DONE'}
		self.logger = logging.getLogger(self.__class__.__name__)


	def stat(self, stat):
		return self.strStat.get(stat, '!!! unknown %s!!!' % stat)


	def addExitFlag(self):
##		with self.lck:
		self.url_queue.put((None, None), False)
##			self.logger.debug('add None to url_queue.')


	def exit(self):
		self.url_queue.close()
		self.url_queue.join_thread()
##		self.url_queue.cancel_join_thread()


	def addUrl(self, title, url):
##		self.logger.debug('adding %s ...', url)
		with self.lck:
			try:
				stat = self.url_hist[url]
			except KeyError:
				self.url_hist[url] = UrlScheduler.PG_WAIT
##				self.logger.debug('try put %s ...', url)
				self.url_queue.put((title, url))
##				self.logger.debug('%s added.', url)
##				self.logger.debug('%s|%s', title, url)
##				return self.url_hist[url]
##			else:
##				self.logger.debug('url %s exists. %s', url, stat)
##				return stat


	def getUrl(self):
##		with self.lck:
		title, url = self.url_queue.get()
		self.url_hist[url] = UrlScheduler.PG_PRO
		return title, url


	def updUrl(self, url):
		self.url_hist[url] = UrlScheduler.PG_DONE



class PageScheduler(object):
	def __init__(self, url_scheduler, pg_queue, lck):
		super(PageScheduler, self).__init__()
		self.url_scheduler  = url_scheduler
		self.page_queue = pg_queue
		self.lck = lck

		self.logger = logging.getLogger(self.__class__.__name__)

	def addExitFlag(self):
		self.page_queue.put((None, None), False)
##			self.logger.debug('add None to page_queue.')


	def exit(self):
		self.page_queue.close()
		self.page_queue.join_thread()
##		self.page_queue.cancel_join_thread()


	def put(self, url, data):
##		self.logger.debug('put %s size %d ...', url, len(data))
##		with self.lck:
		self.url_scheduler.updUrl(url)
		self.page_queue.put((url, data))
##		self.logger.debug('%s size %d put.', url, len(data))


	def get(self):
		return self.page_queue.get()



class PageParser(object):
	def __init__(self, url_scheduler, pg_scheduler, shutdown, mp_mngr, nr_process=3, mlog = None):
		assert url_scheduler is not None
		assert pg_scheduler is not None
		assert mp_mngr is not None
##		assert nr_process>0
		self.logger = logging.getLogger(self.__class__.__name__)

		self.url_scheduler = url_scheduler
		self.page_scheduler  =pg_scheduler
		self.multiprocess_manager = mp_mngr
		self.nr_process = nr_process

		self.processes=[]
		self.nms= None
		self.shutdown=shutdown
		self.mlog=mlog

		self.htmlparser= None 

		self.exclude_first_a_tag = re.compile(r'\A<a\s+.*?>(.*?)<\s*/\s*a\s*>\Z', re.M|re.S|re.I|re.L|re.U)
		self.exclude_first_div_tag = re.compile(r'\A<div.*?>(.*?)</div>\Z', re.M|re.S|re.I|re.L|re.U)
		self.allowHost=self.multiprocess_manager.list()
		self.blockHost=self.multiprocess_manager.list()
		self.allowPattern=self.multiprocess_manager.list()
		self.pAllowPattern=[]
		self.skipPattern=self.multiprocess_manager.list()
		self.pSkipPattern=[]

		self.besttag=None
		self.bestvalue=0
		self.magic=1.0


	def loadCfg(self, inifile='testSpider.ini', inifile_encoding='utf-8'):
		'''load info from ini file specified by @inifile
		bad_domain,max id of reply processed last time, etc.
		'''
		pass


	def saveCfg(self, inifile='testSpider.ini', inifile_encoding='utf-8'):
		'''save info to ini file specified by @inifile
		bad_domain,max id of reply processed last time, etc.
		'''
		pass


	def addHost(self, allow, block):
		info, debug=self.logger.info, self.logger.debug

		if allow:
			if any((isinstance(allow, x) for x in (list, tuple, set))):
				for x in allow:
					x=x.lower()
					if isinstance(x, str):
						x=unicode(x)
##					info('checking %s ...', x)
					if x not in self.allowHost:
						self.allowHost.append(x)
			elif any((isinstance(allow, x) for x in (str, unicode))):
				allow=unicode(allow.lower())
##				info('checking %s ...', allow)
				if allow not in self.allowHost:
					self.allowHost.append(allow)
			else:
				info('unknown type(%s), do nothing!', type(allow).__name__)

			info('now %d items in host allow list', len(self.allowHost))
			pprint(self.allowHost)

		if block:
			if any((isinstance(block, x) for x in (list, tuple, set))):
				for x in block:
					x=x.lower()
					if isinstance(x, str):
						x=unicode(x)
					info('checking %s ...', x)
					if x not in self.blockHost:
						self.blockHost.append(x)
			elif any((isinstance(block, x) for x in (str, unicode))):
				block=unicode(block.lower())
				info('checking %s ...', block)
				if block not in self.blockHost:
					self.blockHost.append(block)
			else:
				info('unknown type(%s), do nothing!', type(block).__name__)

			info('now %d items in host block list', len(self.blockHost))
			pprint(self.blockHost)

		
	def addPattern(self, allow, skip):
		info, debug=self.logger.info, self.logger.debug
		if allow:
			if any((isinstance(allow, x) for x in (list, tuple, set))):
				for x in allow:
					if isinstance(x, str):
						x=unicode(x)
					if x not in self.allowPattern:
						self.allowPattern.append(x)
			elif any((isinstance(allow, x) for x in (str, unicode))):
				allow=unicode(allow)
				if allow not in self.allowPattern:
					self.allowPattern.append(allow)
			else:
				info('unknown type for allow(%s), do nothing!', type(allow).__name__)

			self.pAllowPattern=[ re.compile(x, re.S|re.I|re.U|re.L) for x in self.allowPattern]
			info('allow pattern: %d', len(self.pAllowPattern))

		if skip:
			if any((isinstance(skip, x) for x in (list, tuple, set))):
				for x in skip:
					if isinstance(x, str):
						x=unicode(x)
					if x not in self.skipPattern:
						self.skipPattern.append(x)
			elif any((isinstance(skip, x) for x in (str, unicode))):
				skip=unicode(skip)
				if skip not in self.skipPattern:
					self.skipPattern.append(skip)
			else:
				info('unknown type for skip(%s), do nothing!', type(skip).__name__)

			self.pSkipPattern=[ re.compile(x, re.S|re.I|re.U|re.L) for x in self.skipPattern]
			info('skip pattern: %d', len(self.pSkipPattern))


	def createProcesses(self):
		info, debug= self.logger.info, self.logger.debug

		if self.nr_process:
			info('creating parser processes (%d)...', self.nr_process)
##			self.loadCfg()
			self.lck4mcnt = self.multiprocess_manager.Lock()
			# logger for multiprocess
			if not self.mlog:
				self.mlog=get_logger()
				mhandler=logging.StreamHandler()
				mhandler.setFormatter(logging.Formatter('%(processName)s %(funcName)s %(message)s', '%H:%M:%S'))
				self.mlog.addHandler(mhandler)
				self.mlog.setLevel(logging.INFO)
			# namespace for global counter, etc.
			self.nms=self.multiprocess_manager.Namespace()
			self.nms.mcnt=0

			for i in range(self.nr_process):
				proc = Process(target=self.parseProcess, name='parser-%d'%i,
				               args=(self.url_scheduler, self.page_scheduler, self.shutdown, 'parser-%d'%i))
				self.processes.append(proc)
##				proc.name = proc.name.replace('Proces', 'parser-%d'%i)
				proc.daemon = True
				proc.start()
				info('%s pid %d', proc.name, proc.pid)
		else:
			info('nr_process=%d, no parser processes create.', self.nr_process)


	def getStat(self):
		info, debug=self.logger.info, self.logger.debug
		with self.lck4mcnt:
			info('\ntotal parse %d pages.\n', self.nms.mcnt)


	def closeProcesses(self):
		info, debug=self.logger.info, self.logger.debug

		if len(self.processes)==0:
			info('no parser processes!')
		else:
			info('closing parser processes ...')
##			self.shutdown.set()
			for _ in range(len(self.processes)):
##				try:
				self.page_scheduler.addExitFlag()
##				except Full:
##					pass
##			time.sleep(3)
##			info('wait 3 secs ...')
			for p in self.processes:
				p.terminate()
				p.join()

##			debug('bad domain:')
##			self.saveCfg()
##			pprint(self.bad_domain)

##			for p in self.processes:
##				info('%s is_alive() = %s', p.name, p.is_alive())
			with self.lck4mcnt:
				info('All parser processes closed. Parsed %d', self.nms.mcnt)


	def getBestBodyText(self, tagNode, tag):
		info, debug=self.logger.info, self.logger.debug

##		info('checking body text ...') 
		taglist=tagNode.xpath('./%s'%tag)
		if not taglist:
##			info('got empty, return')
			return
		for t in taglist:
##				tmp=t.xpath('.//*[local-name()!="script" and local-name()!="style" and local-name()!="a" and local-name()!="input" ]/text()')
##				tmp=t.xpath('./*[local-name()!="script" and local-name()!="style" and local-name()!="a" and local-name()!="input" ]/text()')
##				tmp=t.xpath('./*[local-name()!="script" and local-name()!="style" and local-name()!="a" and local-name()!="input" ]/text()')
			tmp=t.xpath('./text()')
			alltext='\n'.join( ( x.strip() for x in tmp) ) 
##				nr_link=len(t.xpath('.//a'))
			nr_link = len(t.xpath('./a'))
			nr_period = alltext.count('，') + alltext.count('。')
			v_tag = nr_period + self.magic*len(alltext) / (nr_link + 1)
			if (self.besttag is None) or (self.bestvalue < v_tag):
##				info('new bestvalue %.3f>%.3f', v_tag, self.bestvalue)
				self.besttag=t
				self.bestvalue=v_tag
			self.getBestBodyText(t, tag)
				

	def parse(self, url, data):
		info, debug=self.logger.info, self.logger.debug
		pg, rurl, code= data
		rslt=None
		if url!=rurl:
##			info('%s ==> %s', url, rurl)
			self.url_scheduler.updUrl(rurl)

			sr=urlparse.urlsplit(rurl)
			rurl=unicode(rurl)
			if unicode(sr.netloc) in self.allowHost:
				# 在允许列表中 或者 不在跳过列表中
				if any( (x.search(rurl)  for x in self.pAllowPattern) ) or (not any( (x.search(rurl) for x in self.pSkipPattern) ) ):
					pass
				else:
					return rslt
			else:
				return rslt

##		info('page size: %d', len(pg))
		tree=etree.fromstring(pg, self.htmlparser)

		# get charset
		charset='utf-8'
		element=tree.xpath('/html/head/meta[@http-equiv="Content-Type"]')
		if element:
			charset= element[0].xpath('@content')[0]
			m=re.search(r'(?iLmsu).*\s*charset=([a-z0-9|-]+)\s*', charset)
			try:
				assert m is not None
			except AssertionError:
				info('no charset found in Content-Type: %s', etree.tostring(element[0]))
			else:
				charset=m.group(1)
		elif tree.xpath('/html/head/meta[@lang]'):
			charset = tree.xpath('/html/head/meta[@lang]/@lang')[0]
##		else:
##			charset='utf-8'
##		info('html charset=%s', charset)

		#  get title
		element=tree.xpath('/html/head/title')
		if element:
			try:
				title=element[0].xpath('./text()')[0]
			except UnicodeDecodeError as e:
				for x in ('utf-8', 'gb18030', 'gbk', 'gb2312', 'big5'):
					if charset!=x:
						tmptree=etree.fromstring(pg, etree.HTMLParser(encoding='gb18030'))
						try:
							title=tmptree.xpath('/html/head/title/text()')[0]
							break
						except UnicodeDecodeError:
							pass
				if not title:
					info('charset=%s, UnicodeDecodeError! %s', charset, e)
					open('/home/kevin/tmp.html', 'w').write(pg)
					self.shutdown.set()
					return rslt
				else:
					info('%s|%s get title using %s instead of %s!', title, rurl, x, charset)
		else:
			title=None
##		info('title|url|size|code %s|%s|%d|%s', title, rurl, len(pg), charset)
##		info('%-6d|%s|%s', len(pg), title, rurl)

		al=tree.xpath('//a[@href]')
##		info('al= %d', len(al))

		# determine html type navi/body
		# xhtml(div) or html(table)
		isXhtml=False
		xmlns=tree.attrib.get('xmlns',None)
		if xmlns and xmlns.find('xhtml')!=-1:
			isXhtml=True
##		element=tree.xpath('/html[@xmlns]')
##		if element:
##			xmlns=element[0].xpath('@xmlns')[0]
##			if xmlns.find('xhtml')!=-1:
##				isXhtml=True
		# find keywords
		element=tree.xpath('/html/head/meta[@name="keywords"]')
		if element:
			keywords=[ x.strip() for x in element[0].xpath('@content')[0].split(',') ]
		element=tree.xpath('/html/head/meta[@name="description"]')
		if element:
			desc=element[0].xpath('@content')[0]
		# all：文件将被检索，且页面上的链接可以被查询；
		# none：文件将不被检索，且页面上的链接不可以被查询；(和 "noindex, no follow" 起相同作用)
		# index：文件将被检索；（让robot/spider登录）
		# follow：页面上的链接可以被查询；
		# noindex：文件将不被检索，但页面上的链接可以被查询；(不让robot/spider登录)
		# nofollow：文件将不被检索，页面上的链接可以被查询。(不让robot/spider顺着此页的连接往下探找) 
		element=tree.xpath('/html/head/meta[@name="robots"]')
		if element:
			robot_opts=[x.strip() for x in element[0].xpath('@content')[0].split(',')]
		nr_a=len(al)
		textlist=tree.xpath('//*[local-name()!="script" and local-name()!="style" and local-name()!="a" and local-name()!="input"]//text()')
		nr_text=len('\n'.join(( x.strip() for x in textlist )))
		v_page=float(nr_a)/nr_text
		# try to find body text
		magic=1.0
		if isXhtml:
			tag='div'
		else:
			if len(tree.xpath('/html/body//table'))>len(tree.xpath('/html/body//div')):
				tag='table'
			else:
				tag='div'
		elem=tree.xpath('/html/body')[0]
		info('\n\n%s', '--'*30)
		self.besttag, self.bestvalue=None, 0
		self.getBestBodyText(elem, tag)
##		info('%s|%s isXhtml:%s, v_page:%.3f, tag=%s', title, rurl, isXhtml, v_page, tag)
		if self.besttag is not None:
			bodytext=self.exclude_first_div_tag.match( htmlentitydecode(etree.tostring(self.besttag)).strip() ).group(1)
##			bodytext='\n'.join( ( x.strip() for x in self.besttag) )
			nr_bodytext=len(bodytext)
			info('%-5d|%s|%s|%.3f|%s|%s\n%s\n\n……………………\n%s\n%s', nr_bodytext, title, isXhtml, v_page, tag, rurl, bodytext[: int(0.2*nr_bodytext)], bodytext[int(nr_bodytext*0.8):], '-='*30)
##			open('/home/kevin/tmp.html', 'w').write(pg)



		rslt=set()
		for item in al:
			a=item.xpath('@href')[0]
			try:
				atext=item.xpath('text()')[0]
			except IndexError:
##				info('\n\n IndexError: |->%s<-|', htmlentitydecode(etree.tostring(item)))
				if item.xpath('./img[@alt]'):
					atext=item.xpath('./img[@alt]/@alt')[0]
##					info('found img/@alt as atext! %s', atext)
				else:
					try:
						atext=self.exclude_first_a_tag.match( htmlentitydecode(etree.tostring(item)).strip() ).group(1)
					except AttributeError:
						el=item.xpath('*')
						atext=''.join((htmlentitydecode(etree.tostring(i)) for i in el))

			atext=atext.strip()
##			atext=atext.encode(charset)
##			if not atext:
##				info(' !!! no text: %s', htmlentitydecode(etree.tostring(item)))

##			info('%s\n\nfind url: %s\n\t%s', '-*'*20, atext, a)

			scheme, netloc = None, None
			sr = urlparse.urlsplit(a)
			scheme, netloc=sr.scheme, sr.netloc
			if not scheme:
				scheme='http'
			if scheme in ('javascript', 'mailto', 'ftp', 'file', 'hthttp'):
##				info('skip %s for %s\n\n%s\n', scheme, a, '-='*30)
				continue
			try:
				assert scheme in ('http','https')
			except AssertionError:
				info('scheme %s unknown!', scheme)
			if not netloc:
				element=tree.xpath('/html/head/base/@href')
				if element:
					baseurl=element[0]
##					info('\n%s\nhtml has base url: %s', '~~'*40, baseurl)
					netloc=baseurl
				else:
					bsr=urlparse.urlsplit(rurl)
					if bsr.netloc:
##						info('use rurl netloc: %s', bsr.netloc)
						netloc=bsr.netloc
					else:
						raise StandardError('no netloc found!')

##			fullurl= urlparse.urlunsplit( (scheme, netloc, sr.path, sr.query, sr.fragment) )
			fullurl= urlparse.urlunsplit( (scheme, netloc, sr.path, sr.query, '') )

##			info('%s\n%s: %s', '-='*30, atext, fullurl)

			fullurl=unicode(fullurl)
			# 在允许列表中 或者 不在跳过列表中
			if unicode(netloc) in self.allowHost:
				if any( (x.search(fullurl)  for x in self.pAllowPattern) ) or (not any( (x.search(fullurl) for x in self.pSkipPattern) ) ):
					rslt.add((atext,fullurl))
##				else:
##					info(' !!!!! %s', fullurl)
##			else:
##				info('%s not in allowHost (%d)!!!', netloc, len(self.allowHost))

		return rslt


	def parseProcess(self, url_queue, page_queue, shutdown, proc_name):
		u'''做为进程池中的进程运行，获得队列 q 中的待处理请求，执行图片文件的获取和保存，以及相关数据库信息的更新。
		'''
		info, debug=self.logger.info, self.logger.debug
		get=page_queue.get
		put=url_queue.addUrl
		self.logger=self.mlog
		info('%s process started ~', proc_name)
		parse=self.parse
		self.htmlparser=etree.HTMLParser()
##		self.lck4mcnt=lck4mcnt
		cnt=0

		while True:
			url, data=get()
			if shutdown.is_set():
				break

			if not url and not data:
##				info('%s process exit (got None from queue).',proc_name)
				break
			if data and (not data[0]):
				info('no actual data return from %s', url)
				continue

##			info('got url=%s, data size: %d', url, len(data[0]))
			urls=parse(url, data)
			if urls:
##				info('add %s url ...', len(urls))
				for t,i in urls:
					put(t,i)

			cnt+=1
			if self.nms and self.lck4mcnt:
				with self.lck4mcnt:
					self.nms.mcnt+=1

			if shutdown.is_set():
				break

		info('%s process exited. parsed %d', proc_name, cnt)


class PageDownloader(PostGetter):
	def __init__(self, cookie_file, url_scheduler, pg_scheduler, shutdown, mp_mngr, nr_process=3, mlog=None):
		super(PageDownloader,self).__init__(cookie_file)
		assert url_scheduler is not None
		assert pg_scheduler is not None
		assert mp_mngr is not None
##		assert nr_process>0
##		self.logger = logging.getLogger(self.__class__.__name__)

		self.url_scheduler=url_scheduler
		self.page_scheduler=pg_scheduler
		self.multiprocess_manager=mp_mngr
		self.nr_process=nr_process

		self.processes=[]
		self.bad_domain=[]
		self.use_proxy=[]
		self.pDomainUseProxy=[]
		self.nms= None
		self.pDomainUseProxy = []
		self.shutdown=shutdown
		self.mlog=mlog


	def loadCfg(self, inifile='testSpider.ini', inifile_encoding='utf-8'):
		'''load info from ini file specified by @inifile
		bad_domain,max id of reply processed last time, etc.
		'''
		curdir = os.path.abspath('.')
		if not os.path.isabs(inifile):
			inifile = os.path.join(curdir, inifile)
		cfg = ConfigParser.SafeConfigParser()
		if not os.access(inifile, os.F_OK):
			codecs.open(inifile, 'w', inifile_encoding).write('[PageGetter]\nbad_domain=[]\nuse_proxy=[]\n')

		cfg.readfp(codecs.open(inifile, 'r', inifile_encoding))
		self.bad_domain = json.JSONDecoder().decode(cfg.get('PageGetter', 'bad_domain'))
		self.use_proxy = json.JSONDecoder().decode(cfg.get('PageGetter', 'use_proxy'))
		self.pDomainUseProxy=[ re.compile(x,re.S|re.I) for x in self.use_proxy ]


	def saveCfg(self, inifile='testSpider.ini', inifile_encoding='utf-8'):
		'''save info to ini file specified by @inifile
		bad_domain,max id of reply processed last time, etc.
		'''
		curdir = os.path.abspath('.')
		if not os.path.isabs(inifile):
			inifile = os.path.join(curdir, inifile)
		cfg = ConfigParser.SafeConfigParser()
		cfg.readfp(codecs.open(inifile, 'r', inifile_encoding))
##		self.logger.debug('bad_domain=%d',len(self.bad_domain))
		cfg.set('PageGetter', 'bad_domain', json.JSONEncoder(ensure_ascii =False, separators=(',', ':')).encode(self.bad_domain).replace(',"', ',\n"'))
		self.use_proxy=list(set(self.use_proxy))
		self.use_proxy.sort(key=lambda x: '.'.join(reversed(x.split('.'))) )
		cfg.set('PageGetter', 'use_proxy', json.JSONEncoder(ensure_ascii =False, separators=(',', ':')).encode(self.use_proxy).replace(',"', ',\n"'))
		cfg.write(codecs.open(inifile, 'w', inifile_encoding))


	def createProcesses(self):
		info, debug= self.logger.info, self.logger.debug

		if self.nr_process:
			info('creating downloader processes (%d)...', self.nr_process)
##			self.loadCfg()
			self.setSocket(20, 2)
			self.bad_domain = self.multiprocess_manager.list(self.bad_domain)
			self.use_proxy = self.multiprocess_manager.list(self.use_proxy)
			self.lck4baddomain = self.multiprocess_manager.Lock()
			self.lck4useproxy = self.multiprocess_manager.Lock()
			self.lck4mcnt = self.multiprocess_manager.Lock()
			# logger for multiprocess
			if not self.mlog:
				self.mlog=get_logger()
				mhandler=logging.StreamHandler()
				mhandler.setFormatter(logging.Formatter('%(processName)s %(funcName)s %(message)s', '%H:%M:%S'))
				self.mlog.addHandler(mhandler)
				self.mlog.setLevel(logging.INFO)
			# namespace for global counter, etc.
			self.nms=self.multiprocess_manager.Namespace()
			self.nms.mcnt=0

			for i in range(self.nr_process):
				proc = Process(target=self.downloadProcess, name='downloader-%d'%i,
				               args=(self.url_scheduler, self.page_scheduler, self.shutdown, 'downloader-%d'%i))
				self.processes.append(proc)
##				proc.name = proc.name.replace('Proces', 'downloader-%d'%i)
				proc.daemon = True
				proc.start()
				info('%s pid %d', proc.name, proc.pid)
		else:
			info('nr_process=%d, no downloader processes create.', self.nr_process)


	def getStat(self):
		info, debug=self.logger.info, self.logger.debug
		with self.lck4mcnt:
			info('\ntotal download %d urls.\n', self.nms.mcnt)


	def closeProcesses(self):
		info, debug=self.logger.info, self.logger.debug

		if len(self.processes)==0:
			info('no downloader processes!')
		else:
			info('closing downloader processes ...')
##			self.shutdown.set()
			for _ in range(len(self.processes)):
##				try:
				self.url_scheduler.addExitFlag()
##				except Full:
##					pass
##			info('wait 5 secs ...')
##			time.sleep(5)
			for p in self.processes:
				p.terminate()
				p.join()

##			self.logger.debug('bad domain:')
##			self.bad_domain=list(self.bad_domain)
##			self.use_proxy=list(self.use_proxy)
##			self.saveCfg()
##			pprint(self.bad_domain)

##			for p in self.processes:
##				info('%s is_alive() = %s', p.name, p.is_alive())
			with self.lck4mcnt:
				info('All downloader processes closed. Downloaded %d', self.nms.mcnt)

	def downloadProcess(self, url_queue, page_queue, shutdown, proc_name):
		u'''做为进程池中的进程运行，获得队列 q 中的待处理请求，执行图片文件的获取和保存，以及相关数据库信息的更新。
		'''
		info, debug=self.logger.info, self.logger.debug
		get=url_queue.getUrl
		put=page_queue.put
		self.logger=self.mlog
		info('%s process started ~',proc_name)
		getit=self._getResponse
##		debug('len(bad_domain)=%d, len(use_proxy)=%d, len(pDomainUseProxy)=%d', len(self.bad_domain),
##		     len(self.use_proxy), len(self.pDomainUseProxy))
		cnt=0

		while True:
			title, url=get()
			if shutdown.is_set():
				break

			if not url and not title:
##				info('%s process exit (got None from queue).',proc_name)
				break
##			info('%s Downloading %s (%s) ...', proc_name, title, url)
			data=getit(url)
			put(url, data)

			cnt+=1
			if self.nms and self.lck4mcnt:
				with self.lck4mcnt:
					self.nms.mcnt+=1

			if shutdown.is_set():
				break

		info('%s process exited. downloaded %d', proc_name, cnt)


class SpiderEngine(object):
	def __init__(self, cookie_file, url_queue_size, pg_queue_size, nr_downloadprocess, nr_parserprocess):
		super(SpiderEngine, self).__init__()

		self.logger = logging.getLogger(self.__class__.__name__)

		self.multiprocess_manager = SyncManager()#SyncManager(('',58585))
		self.multiprocess_manager.start()

		self.lck4urlq=self.multiprocess_manager.Lock()
		self.lck4pageq=self.multiprocess_manager.Lock()
		# event for suprocess to initiative exit.
		self.shutdown=self.multiprocess_manager.Event()

		self.url_queue=Queue(url_queue_size)
		self.page_queue=Queue(pg_queue_size)
		self.url_hist=self.multiprocess_manager.dict()
		self.urls= UrlScheduler(self.url_queue, self.url_hist, self.lck4urlq)

		# init multiprocess log
		self.mlog=get_logger()
		mhandler=logging.StreamHandler()
		mhandler.setFormatter(logging.Formatter('%(processName)s %(funcName)s() | %(message)s', '%H:%M:%S'))
		self.mlog.addHandler(mhandler)
		self.mlog.setLevel(logging.INFO)

		self.pages= PageScheduler(self.urls, self.page_queue, self.lck4pageq)
		self.downloader= PageDownloader(cookie_file, self.urls, self.pages, self.shutdown, self.multiprocess_manager, nr_downloadprocess, self.mlog)
		self.parser=PageParser(self.urls, self.pages, self.shutdown, self.multiprocess_manager, nr_parserprocess, self.mlog)


	def setSignalHandler(self):
		global EXITEVENT
		EXITEVENT=self.shutdown
		if sys.platform=='linux2':
			# 注册信号处理程序
##			signal.signal(signal.SIGUSR1,signalHandler)
			signal.signal(signal.SIGTERM,signalHandler)


	def doWork(self, initurl, allowHost, blockHost, allowPattern, skipPattern, nr_limit):
		info, debug=self.logger.info, self.logger.debug
		self.parser.addHost(allowHost, blockHost)
		self.parser.addPattern(allowPattern, skipPattern)
		self.downloader.createProcesses()
		self.parser.createProcesses()
		info('wait 2 secs ...')
		time.sleep(2)

		self.setSignalHandler()
##		debug('init url: %s',initurl)
		self.urls.addUrl('test', initurl)
##		debug('init url add.')
		old= 0 # self.downloader.nms.mcnt
		try:
			while True:
				if self.shutdown.wait(2):
					info('shutdown event got.')
					break

				if self.downloader.nms.mcnt != old:
					old=self.downloader.nms.mcnt
					info('\n%s\n\tdownloader mcnt: %d\n%s', '-*'*30, old, '-*'*30)
					if old>nr_limit:
						info('exceed nr_limit %d>%d, break', self.downloader.nms.mcnt, nr_limit)
						break

		except KeyboardInterrupt:
			info('got KeyboardInterrupt')
		finally:
			debug('\n%s', '~'*30)
			self.downloader.getStat()
			self.parser.getStat()
			debug('\n%s', '~'*30)
			self.exit()
			debug('\n%s', '*~'*30)
##		time.sleep(1)


	def exit(self):
		info, debug=self.logger.info, self.logger.debug
		self.shutdown.set()
		try:
			while True:
				self.page_queue.get_nowait()
		except Empty:
			pass

		try:
			while True:
				self.url_queue.get_nowait()
		except Empty:
			pass

##		info('page_queue is Empty: %s, %d', self.page_queue.empty(), self.page_queue.qsize())
##		info('url_queue is Empty: %s, %d', self.url_queue.empty(), self.url_queue.qsize())
		self.parser.closeProcesses()

		try:
			while True:
				self.url_queue.get_nowait()
		except Empty:
			pass
##		info('url_queue is Empty: %s, %d', self.url_queue.empty(), self.url_queue.qsize())
		self.downloader.closeProcesses()
##		info('wait 5 secs ...')
##		time.sleep(5)
##		info('close urls and pages queue ...')
##		self.urls.exit()
##		self.pages.exit()
		info('shutting down multiprocess_manager ...')

		if self.multiprocess_manager:
			self.multiprocess_manager.shutdown()
			self.multiprocess_manager=None
		info('multiprocess_manager shut down.')


##os.environ['DJANGO_SETTINGS_MODULE']='postgetter.settings'
##sys.path.append(os.path.dirname(os.path.abspath(__file__)))
# 重要 为了能使用django的orm模块，需要创建目录btView，在里面放上空
#  文件__init__.py和文件models.py,这样django就会去数据库中找btView_Video,
#  btView_Screenshot，btView是创建的另一个ajango应用的app_name
# 如何单独使用ajango的orm模块见 http://wiki.woodpecker.org.cn/moin/UsingDjangoAsAnStandaloneORM
##from postgetter.getpost.models import Forum,Sector,Post,Reply,ImgUrl,ImgFile
##from django.db.models import Max,Min,Count,Avg
##from django.db import transaction
if __name__ == '__main__':
	reload(sys)
	sys.setdefaultencoding('utf-8')

	# django will set env['TZ'] according to the settings.py of django project
	if os.environ.get('TZ', None):
		logging.info('Time Zone: %s',os.environ['TZ']) #	os.environ['TZ']='Asia/Shanghai'
	else:
		logging.info('no "TZ" environ var found, can\'t confirm Time Zone.') #	os.environ['TZ']='Asia/Shanghai'


##	m=SpiderEngine('gen_cookie.dat', 0, 0, 0, 0)
	m=SpiderEngine('gen_cookie.dat', 0, 0, 2, 2)

##	host=u'http://www.jiayuan.com'
##	allowHost='www.jiayuan.com'
##	blockHost=None
##	allowPattern=None
##	skipPattern=[
##			'.*?/profile.jiayuan.com/.*?', 
##			'http://www.jiayuan.com/news/.*?',
##			'http://www.jiayuan.com/vip/.*?',
##			'http://www.jiayuan.com/newmember/*.?',
##			'http://login.jiayuan.com/.*?',
##			'http://www.jiayuan.com/login/.*?',
##			'http://www.jiayuan.com/master/.*?',
##			'http://www.jiayuan.com/helpcenter/.*?',
##			'http://www.jiayuan.com/register.*?',
##			'http://www.jiayuan.com/brightlist_new.php.*?',
##			'http://www.jiayuan.com/paper/select.php.*?',
##			'http://www.jiayuan.com/hot.php.*?',
##			'http://www.jiayuan.com/usercp/.*?'
##			]

##	host=u'http://www.shoudian.com'
##	allowHost='www.shoudian.com'
##	blockHost=None
##	allowPattern=None
##	skipPattern=[
##			'http://www.shoudian.com/space.php\?.*?',
##			'http://www.shoudian.com/redirect.php\?.*?',
##			'http://www.shoudian.com/pm.php\?.*?',
##			'http://www.shoudian.com/post.php\?.*?',
##			'http://www.shoudian.com/space-uid-\d+\.html',
##			'http://www.shoudian.com/space-username-.*?\.html',
##			'http://www.shoudian.com/my.php\?.*?',
##			'http://www.shoudian.com/logging.php\?.*?',
##			'http://www.shoudian.com/viewthread.php\?.*?',
##			'http://www.shoudian.com/misc.php\?.*?',
##			'http://www.shoudian.com/member.php\?.*?'
##			]

##	host=u'http://www.cnbeta.com/'
##	allowHost='www.cnbeta.com'
##	blockHost=None
##	allowPattern=None
##	skipPattern=[
##			'http://www.cnbeta.com/articles/\d+\.htm\?.+',
##			'http://www.cnbeta.com/backend.php.*',
##			'http://www.cnbeta.com/topics/.*',
##			'http://www.cnbeta.com/commentrss.php.*',
##			'http://www.cnbeta.com/rating.php?.*',
##			'http://www.cnbeta.com/newsend.php.*',
##			'http://www.cnbeta.com/joinus.php.*',
##			'http://www.cnbeta.com/argues/.*',
##			'http://www.cnbeta.com/zt/.*',
##			'http://www.cnbeta.com/article.php\?.+',
##			'http://www.cnbeta.com/topic.php\?.+'
##			]

##	host=u'http://www.v2ex.com/'
##	allowHost='www.v2ex.com'
##	blockHost=None
##	allowPattern=None
##	skipPattern=[
##			]

##	host=u'http://www.qq.com/'
##	allowHost=[
##			'news.qq.com',
##			'finance.qq.com',
##			'tech.qq.com',
##			'blog.qq.com',
##			'views.news.qq.com'
##			]
##	blockHost=None
##	allowPattern=None
##	skipPattern=[
##			]
##
	host=u'http://solidot.org/'
	allowHost=[
			'internet.solidot.org',
			'it.solidot.org',
			'linux.solidot.org',
			'opensource.solidot.org',
##			'books.solidot.org',
##			'developers.solidot.org',
##			'apple.solidot.org',
##			'games.solidot.org',
##			'hardware.solidot.org',
##			'software.solidot.org',
##			'interviews.solidot.org',
##			'ask.solidot.org',
##			'science.solidot.org',
##			'society.solidot.org',
##			'idle.solidot.org',
			]
	blockHost=None
	allowPattern=None
	skipPattern=[
			'http://solidot.org/~.+',
			'http://.*?\.solidot.org/comments.pl[\?.*]?',
			'http://.*?\.solidot.org/zoo.pl[\?.*]?',
			'http://.*?\.solidot.org/submit.pl[\?.*]?',
			'http://.*?\.solidot.org/search.pl[\?.*]?',
			'http://.*?\.solidot.org/pollBooth.pl[\?.*]?',
			'http://.*?\.solidot.org/submit.pl[\?.*]?',
			'http://.*?\.solidot.org/print.pl[\?.*]?',
			'http://.*?\.solidot.org/email.pl[\?.*]?',
			'http://.*?\.solidot.org/authors.pl[\?.*]?',
			'http://.*?\.solidot.org/journal.pl[\?.*]?',
			'http://.*?\.solidot.org/bookmark.pl[\?.*]?',
			'http://.*?\.solidot.org/messages.pl[\?.*]?',
			'http://.*?\.solidot.org/users.pl[\?.*]?',
			'http://.*?\.solidot.org/index.pl[\?.*]?',
			'http://.*?\.solidot.org/article.pl[\?.*]?', # most
			'http://.*?\.solidot.org/my/.*',
			'http://.*?\.solidot.org/faq/.*',
			'http://.*?\.solidot.org/about\.s?html',
			'http://.*?\.solidot.org/privacy\.s?html',
			'http://.*?\.solidot.org/terms\.s?html',
			'http://.*?\.solidot.org/contact\.s?html',
			]
	m.doWork(host, allowHost, blockHost, allowPattern, skipPattern, 5)

	logging.debug('done')
##	raw_input('press enter to exit ...')
