#!/usr/bin/python
#coding=utf-8
"""
抓取 www.sbanzu.com 的论坛帖子
"""
import logging
import urlparse
import re
import sys
import datetime
from lxml import etree
import os
import time
import socket
import hashlib
from math import ceil
from cStringIO import StringIO
from PIL import GifImagePlugin, Image, ImageFilter, ImageEnhance
from PostGetter import PostGetter, htmlentitydecode, chkLogin
import _mysql_exceptions
from captchaTool import CGetImg

class SbzPostGetter(PostGetter):
	u'''抓取 www.sbanzu.com'''
	CODE_IMAGE_FILE_PATH='/home/kevin/tmpcode.png' # 验证码图片保存路径
	NR_REPLY_PER_PAGE=10 # 每页回复数量

	def __init__(self, cookie_file):
		PostGetter.__init__(self, cookie_file)
		self.stat_post_add=0
		self.stat_reply_add=0

	def login(self):
		self.logger.debug('to login ...')
		data={}
		forum=Forum.objects.filter(name=self.forum_name).get()
		if forum:
			data['UserName']=forum.usr.encode(self.dft_html_encoding)
			data['Password']=forum.pwd.encode(self.dft_html_encoding)
			data['save']='checked'
			r,_,_=self._getResponse(urlparse.urljoin(forum.home_url,'sbanzu/GetCode.asp').encode(self.dft_img_encoding),data)
			if not r:
				self.logger.info('cant\'t get code image!')
				return False
			else:
				open('/home/kevin/data_bk/python/postgetter-app/scimgdata/%f.gif'%(time.time(),),'wb').write(r)
				rawim=GifImagePlugin.GifImageFile(StringIO(r))

				rawim.seek(0)
				try:
					im=rawim.convert('1')
				except IOError:
					im=rawim.convert('1')
##				im=im.filter(ImageFilter.MedianFilter())
##				ImageEnhance.Contrast(im).enhance(1.5).convert('1').save(self.CODE_IMAGE_FILE_PATH,'png')
##				im=ImageEnhance.Contrast(im).enhance(1.5).convert('1')
				toout=''.join(map(lambda x: ' ' if x==255 else '.',im.getdata()))
				w,h=im.size
				self.logger.info('the image file:\n%s','\n'.join([toout[i:i+w] for i in range(0,w*h,w)]) )
##				codenumber=raw_input('please input code: ')
				codenumber=CGetImg(os.path.expanduser('~/tmp.cookie')).doWork(rawim)
				self.logger.info('got codenumber: %s',codenumber)

			data['code']=codenumber # 验证码
			r,rurl,_=self._getResponse(forum.login_url.encode(self.dft_html_encoding),data)
			if not r:
				self._signed_in=False
			else:
				r=self._tryDecode(r)
##				open('/home/kevin/tmp.html','w').write(r)

				# check html or url
				if r.find(u'个人设置')!=-1:
					self.cj.save(self.cookie_file)
					self._signed_in=True
		else:
			self.logger.debug('can\'t find forum %s, can\'t do login !',self.name)

		return self._signed_in

	@chkLogin
	def getPostList(self,forum_name,sector_name,time_since,page_start,page_stop):
		self.forum_name=forum_name # for login purpose

		upd=None
		cnt=0
		try:
			forum=Forum.objects.filter(name=forum_name)[0:1].get()
		except Forum.DoesNotExist:
			self.logger.debug('can\'t find forum info of %s!',forum_name)
			raise

		try:
			sector=Sector.objects.filter(forum=forum,name=sector_name)[0:1].get()
		except Sector.DoesNotExist:
			self.logger.debug('can\'t find sector info of %s!',sector_name)
			raise

		for i in xrange(page_start,page_stop): # max page number
			gotnew_this_page=False # 标记本页面中是否处理了新贴/更新回帖，以便在本页没有新内容的情况下尽早退出循环

			if self.exitevent.is_set():
				self.logger.info('got exit signal!')
				break

			pageurl=sector.url%(i,)
##			self.logger.info('get %s ...',pageurl)
			data=self._getData(pageurl,None,'%s-page %02d'%(sector.name,i))
			if len(data)<100: # 论坛大姨妈了
				self.logger.info('bbs down? %s',data)
				break

			# 用 lxml xpath 选择包含正文的 div
			parser=etree.HTMLParser()
			tree=etree.fromstring(data,parser)
			el=tree.xpath(sector.url_pattern)

			for item in el: # 列表中的每个主题
				if self.exitevent.is_set():
					self.logger.info('got exit signal!')
					break
##				self.logger.debug('-='*10)
				keeptop= True if 'folderstate3' in item[0].xpath('img/@src')[0] else False # 通过图标名称判断是否为置顶贴
				if keeptop:
					gotnew_this_page=True # 避免首页都是置顶贴且无更新的情况下过早退出

				try:
					try:
						title=item[3].xpath('table/tr/td/a/text()')[0]
					except IndexError:
						try:
							title=item[3].xpath('table/tr/td/a/font/b/text()')[0]
						except IndexError:
							title=item[3].xpath('table/tr/td/a/font/text()')[0]

					# 取主题信息
					title_url=item[3].xpath('table/tr/td/a/@href')[0]
					m=re.search('TopicID=(\d+)',title_url)
					if m:
						lid=m.group(1)
					else:
						self.logger.debug('can\'t find locate_id for post %s',title)
						lid=''

					try:
						author=item[6].xpath('a/text()')[0]
					except IndexError:
						author==''
					crt_date=item[6].xpath('@title')[0][5:]
					nr_reply=int(item[5].text)

					pages= int(ceil((nr_reply)/float(self.NR_REPLY_PER_PAGE)) )

					upd_date=item[8].text.strip()
	##				self.logger.info('(%02d) (%02d) %s |%s|%s|%s|%s|%s',cnt,pages,title,author,crt_date,upd_date,nr_reply,title_url)
					cnt+=1
	##				self.logger.info('%d---%02d)%s|%s|%d-%d|%s|%s',item.sourceline,cnt,lid,upd_date,nr_reply,pages,author,title)
					self.logger.info('%s\n%02d)\t%s|%s|%d-%d|%s|%s', '-='*10, cnt, lid, upd_date, nr_reply, pages, author, title.strip())
					try:
						upd=datetime.datetime.strptime(upd_date,'%Y-%m-%d %H:%M:%S')
					except ValueError:
						self.logger.debug('upd_date=%s,no H:M:S?',upd_date)
						upd=datetime.datetime.strptime(upd_date,'%Y-%m-%d')
					if time_since and upd<time_since and (not keeptop):
						self.logger.info('%s\ngot time stop in page list.','-~'*20)
						return

					if author=='':
						author='usr for post %s'%(post.lid,)

					# check DB, insert if not exist, check update otherwise
					try:
						p=Post.objects.filter(sector=sector,locate_id=lid)[0:1].get()
					except Post.DoesNotExist:
						p=Post(sector=sector,locate_id=lid,url=title_url,title=title,author=author,
							crt_date=crt_date,upd_date=upd_date,nr_reply=0) # 先将DB中的nr_reply设为0，避免后面取帖子出错的情况下无法通过重新执行来重取帖子
						p.save()
						self.stat_post_add+=1
						p.nr_reply=nr_reply
						self.logger.debug('post %s created.',lid)
						self.getPost(p)
						gotnew_this_page=True
					else:
						if p.upd_date!=upd:
							p.upd_date=upd_date
							p.save()
						if p.nr_reply!=nr_reply:
							self.logger.info('post %s nr_reply changed. %+d',lid,nr_reply-p.nr_reply)
							self.getPost(p,nr_reply)
							gotnew_this_page=True
				except IndexError:
					if '浏览的页面或提交的数据包含敏感关键词信息,该关键词已经被过滤' in  htmlentitydecode(etree.tostring(item)):
						self.logger.info('got page contains words! %s ',htmlentitydecode(etree.tostring(item)))
						continue
					else:
						raise

#-#			if not gotnew_this_page:
#-#				self.logger.info('%s\nno new/updated post/reply in this page.','-~'*20)
#-#				return

	def getPost(self,post,new_nr=0):
		'''获取页面中的帖子。根据主题列表中显示的回帖数决定翻页范围'''
		cnt=0
		sr=urlparse.urlsplit(post.url)
		if not sr.scheme:
			posturl=post.sector.base_url+post.url
		else:
			posturl=post.url

		posturl+='&TopicPage=%d'
		posturl=posturl.replace('topicdisplay.asp','topicdisplay_safe.asp')

		startpage,stoppage=self.getPageRange(self.NR_REPLY_PER_PAGE,0 if post.nr_reply-1<0 else post.nr_reply-1,0 if new_nr==0 else new_nr-1)

		self.logger.debug('post %s page range [%d,%d)',post.locate_id,startpage,stoppage)
		parser=etree.HTMLParser()
		for pg in xrange(startpage,stoppage):
			if self.exitevent.is_set():
				self.logger.info('got exit signal!')
				break
			self.logger.debug('post %s %s ...',post.locate_id,posturl%pg)
			data=self._getData(posturl%pg,None,post.title)

			tree=etree.fromstring(data,parser)

			posts=tree.xpath('//table[@class="maintable"][1]/tr[position()>1]')
			for item in posts:
				try:
					author=item[0].xpath('a/text()')[0]
				except IndexError:
					self.logger.debug('no author info?')
					author=''

					if '浏览的页面或提交的数据包含敏感关键词信息,该关键词已经被过滤' in  htmlentitydecode(etree.tostring(item)):
						self.logger.info('got page contains words! %s ',htmlentitydecode(etree.tostring(item)))
						continue

				try:
					crt_date=item[1].xpath('font/text()')[0][6:-1]
					# 判断是否是主题贴，因为主题贴无法获得replyid
					tmp=item[1].xpath('*[position()<5]')
					if len(tmp)==4 and [x.tag for x in tmp]==['b','hr','font','hr']: # 是主题贴
						replyid=0
						try:
							realtitle=item[1].xpath('b/text()')[0]
						except IndexError:
							try:
								realtitle=item[1].xpath('b/font/b/text()')[0]
							except IndexError:
								realtitle=item[1].xpath('b/font/text()')[0]

						if post.title!=realtitle:
							self.logger.info('post %s realtitle %s',post.locate_id,realtitle)
							post.title=realtitle
							post.save()
						# 为后面获取回复内容而删除非回复信息(主题/分割线/发表时间等）
						for x in item[1].xpath('*[position()<5]'):
							item[1].remove(x)
						item[1].text=''
					else: # 非主题贴
						replyurl=item[1].xpath('font/a[1]/@href')[0]
						replyid=re.search('ReplyID=(\d+)',replyurl).group(1)
						# 为后面获取回复内容而删除非回复信息(分割线/发表时间等）
						for x in item[1].xpath('*[position()<3]'):
							item[1].remove(x)

					replycontent=self.exclude_first_td_tag.match(htmlentitydecode(etree.tostring(item[1])).strip()).group(1)
					if replycontent.startswith('<br/>'):
						replycontent=replycontent[5:]

					if author=='':
						author='usr for %d-%s'%(post.id,replyid)

	##					open('/home/kevin/tmp_post.txt','w').write(replycontent)
					try:
						r=Reply.objects.filter(post=post,locate_id=replyid)[0:1].get()
					except Reply.DoesNotExist:
						r=Reply(post=post,locate_id=replyid,crt_date=crt_date,author=author,content=replycontent)
						try:
							r.save()
						except _mysql_exceptions.Warning:
							self.logger.debug('got _mysql_exceptions.Warning!')
							r.content=self.exclude_first_td_tag.match(etree.tostring(item[1]).strip()[:-5]).group(1)
							if replycontent.startswith('<br/>'):
								replycontent=replycontent[5:]
							r.save()

						cnt+=1
				except IndexError:
					if '浏览的页面或提交的数据包含敏感关键词信息,该关键词已经被过滤' in  htmlentitydecode(etree.tostring(item)):
						self.logger.info('got page contains words! %s ',htmlentitydecode(etree.tostring(item)))
						continue
					else:
						raise
				except AttributeError:
					if '浏览的页面或提交的数据包含敏感关键词信息,该关键词已经被过滤' in  htmlentitydecode(etree.tostring(item)):
						self.logger.info('got page contains words! %s ',htmlentitydecode(etree.tostring(item)))
						continue
					else:
						raise


		if new_nr!=0:
			if post.nr_reply+cnt==new_nr:
				self.logger.info('post %s %+d reply. now %d',post.locate_id,cnt,new_nr+1)
				post.nr_reply+=cnt # 增加实际变化（新增）的数量
			else:
				self.logger.debug('post %s %+d reply, %d != expect %d (no right new_nr info?)',post.locate_id,cnt,post.nr_reply+cnt,new_nr)
				# 检查实际获得数量
				actualcnt=Reply.objects.filter(post=post).count()
				self.logger.info('post %s actual %d reply in DB',post.locate_id,actualcnt)
				post.nr_reply=actualcnt-1 if actualcnt-1>=0 else 0
		else:
			if post.nr_reply+1==cnt:
				self.logger.info('post %s init %+d reply.',post.locate_id,cnt)
			else:
				self.logger.info('post %s init %+d reply, != expect %d',post.locate_id,cnt,post.nr_reply+1)
			post.nr_reply=cnt-1 if cnt-1>=0 else 0 # 设为实际获得值-1，以便下次再次尝试查找新增帖子
		post.save()
		self.stat_reply_add+=cnt


	def getPosts(self):
		'''根据文本文件中的主题记录号来抓帖子，此函数只是为了抓取特别的帖子, 比如
		实际帖子数比主题列表中显示的帖子数要大；
		最后回帖日期与主题列表中显示的最后回复日期不一致；
		包含论坛指定的敏感词而导致显示帖子提前结束，使保存的帖子数少于理论回帖数'''
		from django.db.models import Count
		cnt=1
		for x in open('/home/kevin/table_test1.txt').read().split():
			self.logger.debug('%d) post %s ...',cnt,x)
			try:
				p=Post.objects.filter(id=x)[0]
			except Post.DoesNotExist:
				self.logger.debug('post %s doesn\'t exists!',x)
				continue
			else:
				act_nr=Reply.objects.filter(post=p).aggregate(a=Count('id'))['a']
				self.logger.debug('post %s actual nr %d',x,act_nr)
##				self.getOnePostSmart(p,self.getPageRange(self.NR_REPLY_PER_PAGE,0 if act_nr-1<0 else act_nr-1,1)[0])
				self.getOnePostSmart(p,1)
			cnt+=1


	def getOnePostSmart(self,post,from_page=1):
		'''获取页面中的帖子。根据页面中的“下一页”链接是否存在来决定是否结束翻页，比根据主题列表中显示的回帖数决定翻页范围要更准确'''
		cnt=0
		sr=urlparse.urlsplit(post.url)
		if not sr.scheme:
			posturl=post.sector.base_url+post.url
		else:
			posturl=post.url

		posturl+='&TopicPage=%d'
		posturl=posturl.replace('topicdisplay.asp','topicdisplay_safe.asp')

		parser=etree.HTMLParser()
		pg=from_page
		while True:
			if self.exitevent.is_set():
				self.logger.info('got exit signal!')
				break
			self.logger.debug('post %s %s ...',post.locate_id,posturl%pg)
			data=self._getData(posturl%pg,None,post.title)

			tree=etree.fromstring(data,parser)

			posts=tree.xpath('//table[@class="maintable"][1]/tr[position()>1]')
			haspostinpage=False
			gotfuckingword=False
			for item in posts:
				haspostinpage=True
				try:
					author=item[0].xpath('a/text()')[0]
				except IndexError:
					self.logger.debug('no author info?')
					author=''

					if '浏览的页面或提交的数据包含敏感关键词信息,该关键词已经被过滤' in  htmlentitydecode(etree.tostring(item)):
						self.logger.info('got page contains words! %s ',htmlentitydecode(etree.tostring(item)))
						gotfuckingword=True
						continue

				try:
					crt_date=item[1].xpath('font/text()')[0][6:-1]
					# 判断是否是主题贴，因为主题贴无法获得replyid
					tmp=item[1].xpath('*[position()<5]')
					if len(tmp)==4 and [x.tag for x in tmp]==['b','hr','font','hr']: # 是主题贴
						replyid=0
						try:
							realtitle=item[1].xpath('b/text()')[0]
						except IndexError:
							try:
								realtitle=item[1].xpath('b/font/b/text()')[0]
							except IndexError:
								realtitle=item[1].xpath('b/font/text()')[0]

						if post.title!=realtitle:
							self.logger.debug('post %s realtitle %s',post.locate_id,realtitle)
							post.title=realtitle
						# 为后面获取回复内容而删除非回复信息(主题/分割线/发表时间等）
						for x in item[1].xpath('*[position()<5]'):
							item[1].remove(x)
						item[1].text=''
					else: # 非主题贴
						replyurl=item[1].xpath('font/a[1]/@href')[0]
						replyid=re.search('ReplyID=(\d+)',replyurl).group(1)
						# 为后面获取回复内容而删除非回复信息(分割线/发表时间等）
						for x in item[1].xpath('*[position()<3]'):
							item[1].remove(x)

					replycontent=self.exclude_first_td_tag.match(htmlentitydecode(etree.tostring(item[1])).strip()).group(1)
					if replycontent.startswith('<br/>'):
						replycontent=replycontent[5:]

					if author=='':
						author='usr for %d-%s'%(post.id,replyid)

	##					open('/home/kevin/tmp_post.txt','w').write(replycontent)
					try:
						r=Reply.objects.filter(post=post,locate_id=replyid)[0:1].get()
					except Reply.DoesNotExist:
						r=Reply(post=post,locate_id=replyid,crt_date=crt_date,author=author,content=replycontent)
						try:
							r.save()
						except _mysql_exceptions.Warning:
							self.logger.debug('got _mysql_exceptions.Warning!')
							r.content=self.exclude_first_td_tag.match(etree.tostring(item[1]).strip()[:-5]).group(1)
							if replycontent.startswith('<br/>'):
								replycontent=replycontent[5:]
							r.save()

						cnt+=1
				except IndexError:
					if '浏览的页面或提交的数据包含敏感关键词信息,该关键词已经被过滤' in  htmlentitydecode(etree.tostring(item)):
						self.logger.info('got page contains words! %s ',htmlentitydecode(etree.tostring(item)))
						gotfuckingword=True
						continue
					else:
						raise
				except AttributeError:
					if '浏览的页面或提交的数据包含敏感关键词信息,该关键词已经被过滤' in  htmlentitydecode(etree.tostring(item)):
						self.logger.info('got page contains words! %s ',htmlentitydecode(etree.tostring(item)))
						gotfuckingword=True
						continue
					else:
						raise

			# check page next
			x=tree.xpath('//td[@class="outtd"][1]/table[2]/tr[1]/td[2]/a')
			if ('[>]' in [t.text for t in x]) and haspostinpage:
				pg+=1
			elif gotfuckingword: #
				pg+=1
			else:
				break


		self.logger.debug('post %s %+d reply',post.locate_id,cnt)
		# 检查实际获得数量
		actualcnt=Reply.objects.filter(post=post).count()
		self.logger.debug('post %s actual %d reply in DB',post.locate_id,actualcnt)
		post.nr_reply=actualcnt-1 if actualcnt-1>=0 else 0
		post.save()

	def printStat(self):
		self.logger.info('post add %d, reply add %d',self.stat_post_add,self.stat_reply_add)


os.environ['DJANGO_SETTINGS_MODULE']='postgetter.settings'
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
# 重要 为了能使用django的orm模块，需要创建目录btView，在里面放上空
#  文件__init__.py和文件models.py,这样django就会去数据库中找btView_Video,
#  btView_Screenshot，btView是创建的另一个ajango应用的app_name
# 如何单独使用ajango的orm模块见 http://wiki.woodpecker.org.cn/moin/UsingDjangoAsAnStandaloneORM
from postgetter.getpost.models import Forum,Sector,Post,Reply,ImgUrl,ImgFile
from django.db.models import Max,Min,Count,Avg
from django.db import transaction

if __name__=='__main__':
	reload(sys)
	sys.setdefaultencoding('utf-8')

	# django will set env['TZ'] according to the settings.py of django project
	logging.info('Time Zone: %s',os.environ['TZ']) #	os.environ['TZ']='Asia/Shanghai'


	day=6

	pg=SbzPostGetter(os.path.join(os.path.dirname(os.path.abspath(__file__)),'sbz_cookie.dat'))
	pg.doWork('sbz','sbz-自然科学',
##								datetime.datetime.today()-datetime.timedelta(minutes=60),
##								datetime.datetime.today()-datetime.timedelta(hours=25),
								datetime.datetime.today()-datetime.timedelta(days=day),
								1,40)
	time.sleep(1)
	pg.doWork('sbz','sbz-古代战争',
##								datetime.datetime.today()-datetime.timedelta(minutes=60),
##								datetime.datetime.today()-datetime.timedelta(hours=25),
								datetime.datetime.today()-datetime.timedelta(days=day),
								1,40)
	pg.printStat()

	logging.debug('done')
	raw_input('press enter to exit ...')
