#!/usr/bin/python
#coding=utf-8

import logging
import os
import shutil
import sys
import datetime
import os.path
import re
from cStringIO import StringIO
from PIL import GifImagePlugin, Image, ImageFilter, ImageEnhance
import urlparse
import hashlib
import cookielib
import urllib2
from pprint import pprint
import socket
import urllib
import _mysql_exceptions
from multiprocessing import Process, Queue, Pool, Lock, get_logger
from multiprocessing.managers import SyncManager
import errno
from Queue import Full
import json
import ConfigParser
import codecs
from optparse import OptionParser
from PostGetter import PostGetter, htmlentitydecode, ContentEncodingProcessor, chkLogin
import httplib

logging.basicConfig(level=logging.DEBUG,
##	format='%(thread)d %(asctime)s %(funcName)s %(message)s',
##	format='%(asctime)s %(name)s %(levelname)s %(funcName)s %(message)s',
	format='%(funcName)s %(message)s',
	datefmt = '%H:%M:%S')


class getImgFile(PostGetter):
	def __init__(self, cookie_file, imgfile_base_path):
		PostGetter.__init__(self, cookie_file)
		self.imgfile_base_path = os.path.expanduser(imgfile_base_path)
		assert os.access(self.imgfile_base_path, os.F_OK) == True

		self.fileext = {'jpeg': 'jpg', 'png': 'png', 'gif': 'gif', 'jpg': 'jpg', 'bmp': 'bmp', 'tiff': 'tif'}
		self.pImg = re.compile(r'<img.*?\s+src\s*=\s*"\s*(?!")(.+?)\s*".*?/>', re.U | re.M | re.S | re.I)
		self.bad_domain = []
		self.use_proxy = []
		self.nms = None
		self.pDomainUseProxy = []
		self.multiprocess_manager = None


	def getImageUrl(self, forum_name, sector_name, id_from=None, nr_limit=None):
		u'''获取 reply里面的 img 链接，并入库
		'''
		cnt, tmpminid, step = 0, 0, 5000
##		forum=Forum.objects.get(name__iexact=forum_name)
		sector = Sector.objects.get(forum__name__iexact=forum_name, name__iexact=sector_name)
		if id_from:
			tmpminid = id_from
			maxid = Reply.objects.filter(post__sector=sector).aggregate(maxid=Max('id'))['maxid']
		elif self.processed_maxid:
			self.logger.info('max id processed last time is %d', self.processed_maxid)
			tmpminid = self.processed_maxid  # 上次处理过的reply的最大id
			maxid = Reply.objects.filter(post__sector=sector).aggregate(maxid=Max('id'))['maxid']
		else:
			tmp = Reply.objects.filter(post__sector=sector).aggregate(minid=Min('id'), maxid=Max('id'))
			tmpminid, maxid = tmp['minid'], tmp['maxid']

		self.logger.info('%s\n\tget img url from %s minid,maxid=%d,%d\n\n', '~~~~'*20, sector_name, tmpminid, maxid)
		itertimes = 0
		while (not nr_limit) or (nr_limit and cnt <= nr_limit):
			self.logger.info('%s%d) cnt=%d, tmpminid=%d %s', '-='*15, itertimes, cnt, tmpminid, '-=' * 15)
			rl = Reply.objects.defer('locate_id', 'author', 'crt_date').filter(
				post__sector = sector, id__gte = tmpminid, id__lt = tmpminid+step).order_by('id').iterator()

			for reply in rl:
				cnt += 1
##				self.logger.info('%d) reply %d',cnt,reply.id)

				if nr_limit and nr_limit <= cnt:
					self.logger.info('to nr_limit ! %d', reply.id)
					break

				self._getImg4Reply(reply)
				self.processed_maxid = reply.id

				if self.exitevent.is_set():
					self.logger.info('got exit signal!')
					break


			if self.exitevent.is_set():
				self.logger.info('got exit signal!')
				break

			if nr_limit and nr_limit <= cnt:
				self.logger.info('to nr_limit !')
				break

			if tmpminid > maxid:
				self.logger.info('to maxid! %d', tmpminid)
				break


			itertimes += 1
			tmpminid += step


	def _getImg4Reply(self, reply):
		u'''从 reply 中 获取 img 链接存入数据库'''
		imglist = self.pImg.findall(reply.content)
		dbg_title_prted = False
		premovetag = re.compile('(<.*?>)', re.M | re.S)
		if imglist:
			for i, x in enumerate(imglist):
##				self.logger.info('\t(%d) %d|%s',i+1,reply.id,x)
				# unquote chars like %E5%88%98%E5%BC%BA
##				x=re.sub('(?:(?:%[0-9A-Fa-f]{2})+)+?',lambda xx:self._tryDecode(urllib.unquote(xx.group().encode('gbk'))),x)
				try:
					x = re.sub('(?:(?:%[0-9A-Za-z]{2,3})+)+?', lambda xx: self._tryDecode(urllib.unquote(xx.group().encode('gbk'))), x)
				except UnicodeDecodeError as e:
					self.logger.info('reply %d except: %s\n\t%s', reply.id, e, x)
					continue

				# exceed db field length limit
				if len(x) > 255:
					self.logger.debug('got long url for reply %d, %d', reply.id, len(x))
					continue

				# remove tag
				x = premovetag.sub('', x)

				# convert related path to absolute path
				sr = urlparse.urlsplit(x)

				if sr.scheme == '':
					x = urlparse.urljoin(reply.post.sector.base_url, x)
				elif sr.scheme not in ('http', 'https'):
					self.logger.debug('reply %s bad url %s', reply.id, x)
					continue

				iurl, created = ImgUrl.objects.get_or_create(url=x, defaults={'img': None, 'stat': 0})
				if created:
					self.logger.debug('new imgurl %d for %s', iurl.id, iurl.url)

##				try:
##					t=ImgUrl.objects.get(url__exact=x)
##				except ImgUrl.DoesNotExist:
##					t=ImgUrl(url=x,img=None,stat=0)
##					t.save()
##					self.logger.debug('new imgurl %d for %s', t.id, t.url)

				# 'http://photo.sbanzu.com/album/photos_m/*' is resized pic, while 'http://photo.sbanzu.com/album/photos/' is original pic,
				# so we also need get the original pic if not present in DB
				if x.startswith('http://photo.sbanzu.com/album/photos_m/'):
					newx=iurl.url.replace('http://photo.sbanzu.com/album/photos_m/', 'http://photo.sbanzu.com/album/photos/')
					obj, created=ImgUrl.objects.get_or_create(url=newx, defaults={'img':None, 'stat':0})
					if created:
						self.logger.debug('new imgurl %d for %s [original]', iurl.id, newx)


	def loadCfg(self, inifile='getImg.ini', inifile_encoding='utf-8'):
		'''load info from ini file specified by @inifile
		bad_domain,max id of reply processed last time, etc.
		'''
		curdir = os.path.abspath('.')
		if not os.path.isabs(inifile):
			inifile = os.path.join(curdir, inifile)
		cfg = ConfigParser.SafeConfigParser()
		if not os.access(inifile, os.F_OK):
			codecs.open(inifile, 'w', inifile_encoding).write('[imageurl]\nbad_domain=[]\nuse_proxy=[]\n[reply_id]\nmaxid=0\n')

		cfg.readfp(codecs.open(inifile, 'r', inifile_encoding))
		self.bad_domain = json.JSONDecoder().decode(cfg.get('imageurl', 'bad_domain'))
		self.use_proxy = json.JSONDecoder().decode(cfg.get('imageurl', 'use_proxy'))
		self.pDomainUseProxy=[ re.compile(x, re.S|re.I) for x in self.use_proxy ]
##		self.logger.debug('bad_domain=%d',len(self.bad_domain))
		self.processed_maxid=cfg.getint('reply_id', 'maxid')


	def saveCfg(self, inifile='getImg.ini', inifile_encoding='utf-8'):
		'''save info to ini file specified by @inifile
		bad_domain,max id of reply processed last time, etc.
		'''
		curdir = os.path.abspath('.')
		if not os.path.isabs(inifile):
			inifile = os.path.join(curdir, inifile)
		cfg = ConfigParser.SafeConfigParser()
		cfg.readfp(codecs.open(inifile, 'r', inifile_encoding))
##		self.logger.debug('bad_domain=%d',len(self.bad_domain))
		cfg.set('imageurl', 'bad_domain', json.JSONEncoder(ensure_ascii = False, separators = (',', ':')).encode(self.bad_domain).replace(',"', ',\n"'))
		self.use_proxy = list(set(self.use_proxy))
		self.use_proxy.sort(key=lambda x: '.'.join(reversed(x.split('.'))) )
		cfg.set('imageurl', 'use_proxy', json.JSONEncoder(ensure_ascii = False, separators = (',', ':')).encode(self.use_proxy).replace(',"', ',\n"'))
		cfg.set('reply_id','maxid', str(self.processed_maxid))
		cfg.write(codecs.open(inifile, 'w', inifile_encoding))


	def getImage(self, id_from=None, nr_limit=None, nr_process=5):
		u'''从 数据库中取出待获取实际文件的图片链接'''
		info, debug = self.logger.info, self.logger.debug
		docnt = 0

		if nr_process:
			queue = Queue(nr_process*3)
			info('creating worker processes (%d)...',nr_process)
			self.loadCfg()
			self.setSocket(5, 1)
			if not self.multiprocess_manager:
				self.multiprocess_manager = SyncManager()#SyncManager(('',58585))
			self.multiprocess_manager.start()
			self.bad_domain = self.multiprocess_manager.list(self.bad_domain)
			self.use_proxy = self.multiprocess_manager.list(self.use_proxy)
			self.lck4baddomain = self.multiprocess_manager.Lock()
			self.lck4useproxy = self.multiprocess_manager.Lock()
			self.lck4mcnt = self.multiprocess_manager.Lock()
			# logger for multiprocess
			self.mlog=get_logger()
			mhandler=logging.StreamHandler()
			mhandler.setFormatter(logging.Formatter('%(processName)s %(funcName)s %(message)s', '%H:%M:%S'))
			self.mlog.addHandler(mhandler)
			self.mlog.setLevel(logging.INFO)
			# event for suprocess to initiative exit.
			shutdown= self.multiprocess_manager.Event()
			# namespace for global counter, etc.
			self.nms=self.multiprocess_manager.Namespace()
			self.nms.mcnt=0

			processes = []
			for i in range(nr_process):
				proc = Process(target=self.getImageProcess, name='worker-%d'%i,
				               args=(queue, shutdown, 'worker-%d'%i))
				processes.append(proc)
##				proc.name = proc.name.replace('Proces', 'myWorker-%d'%i)
				proc.daemon = True
				proc.start()

		cnt, tmpminid, step = 0, 0, 500
		if id_from:
			tmpminid = id_from
			tmp = ImgUrl.objects.aggregate(maxid = Max('id'))
			maxid= tmp['maxid'] if tmp['maxid'] else 0
		else:
##			tmp = ImgUrl.objects.aggregate(minid = Min('id'), maxid = Max('id'))
			tmp = ImgUrl.objects.filter(
				img__isnull=True).exclude(
				stat__exact=4).aggregate(minid = Min('id'), maxid = Max('id'))
			tmpminid, maxid = tmp['minid'] if tmp['minid'] else 0, tmp['maxid'] if tmp['maxid'] else 0

		info('%s\n\tget img from net, minid,maxid=%d,%d\n\n', '~~~~'*20,tmpminid, maxid)
		itertimes = 0
		while ( not nr_limit) or cnt <= nr_limit:
			info('%s%d) cnt=%d, tmpminid=%d %s', '-='*15, itertimes, cnt, tmpminid, '-='*15)
##			il=ImgUrl.objects.filter(id__gte=tmpminid,id__lt=tmpminid+step).filter(
##			  img__isnull=True).exclude(
##				stat__exact=4).order_by('id').iterator()

##			il = ImgUrl.objects.filter(id__gte=tmpminid, id__lt=tmpminid+step).filter(
##				img__isnull=True).filter(
##					stat__exact=0).order_by('id').iterator()

			il=ImgUrl.objects.filter(id__gte=tmpminid,id__lt=tmpminid+step).filter(
			  img__isnull=True).exclude(
				stat__exact=4).order_by('id').iterator()

#-#			il=ImgUrl.objects.filter(id__gte=tmpminid,id__lt=tmpminid+step).filter(
#-#			  img__isnull=True).order_by('id').iterator()

##			il=ImgUrl.objects.filter(id__gte=tmpminid,id__lt=tmpminid+step).filter(
##			  img__isnull=True).exclude(
##			    url__istartswith='http://photo.sbanzu.com/album/').order_by('id').iterator()

#-#			il=ImgUrl.objects.filter(id__gte=tmpminid,id__lt=tmpminid+step).filter(
#-#			  img__isnull=True).exclude(
#-#				stat__exact=4).exclude(
#-#				stat__exact=3).order_by('id').iterator()

##			il=ImgUrl.objects.filter(id__gte=tmpminid,id__lt=tmpminid+step).filter(
##			  img__isnull=True).filter(
##				url__contains='.livefilestore.com/').order_by('id').iterator()

##			if DEBUG:
##				self.logger.debug('sql=%s',connections['db_postpic'].queries[-1]['sql'])

			for iurl in il:
				notok = True
				cnt += 1
##				self.logger.info('%d) imgurl %d',cnt,iurl.id)

				if nr_limit and nr_limit <= cnt:
					info('to nr_limit ! %d', iurl.id)
					break

				sr = urlparse.urlsplit(iurl.url)
				if sr.netloc:
					if nr_process:
						with self.lck4baddomain:
							if sr.netloc in self.bad_domain:
##								info('\tbad domain %s for %s',sr.netloc,iurl.url)
								continue
					else:
						if sr.netloc in self.bad_domain:
##						info('\tbad domain %s',sr.netloc)
							continue

				docnt += 1
				if nr_process:
					queue.put((iurl.id,iurl)) # 阻塞方式放入队列
##					debug('%d) queue %d  %s ...',docnt,iurl.id,iurl.url)
				else:
					self._getImageFromNet(iurl)

				if self.exitevent.is_set():
					info('got exit signal!')
					break

			if self.exitevent.is_set():
				info('got exit signal!')
				break

			if nr_limit and nr_limit<=cnt:
				info('to nr_limit !')
				break

			if tmpminid>maxid:
				info('to maxid! %d',tmpminid)
				break
			itertimes+=1
			tmpminid+=step

		if nr_process:
			info('closing worker processes ...')
			shutdown.set()
			for _ in range(len(processes)):
				try:
					queue.put((-1,None), False)
				except Full:
					pass
			for p in processes:
##				p.terminate()
				p.join()

##			self.logger.debug('bad domain:')
			self.bad_domain=list(self.bad_domain)
			self.use_proxy=list(self.use_proxy)
			self.saveCfg()
##			pprint(self.bad_domain)
			if self.multiprocess_manager:
				self.multiprocess_manager.shutdown()
				self.multiprocess_manager=None


	def _getImageFromNet(self,imgurl):
		u'''获取 imgurl.url 对应的文件，按某种规则保存到某个目录中，并保存相关信息 imgfile 表中
		'''
		# possible url map
		url=imgurl.url
		if url.startswith(u'http://www.qbq.cn/uploadfile/'):
			url=url.replace(u'http://www.qbq.cn/uploadfile/', u'http://www.small-arms.org/bbs/uploadfile/')
			self.logger.debug('url map to %s',url)
		url=url.replace('<br>','').strip()
		url=url.replace('</br>','').strip()
		url=url.replace(u'58.49.58.159/',u'59.173.12.109:8181/')
		url=url.replace(u'58.49.58.159:8181/',u'59.173.12.109:8181/')

		if url.startswith(u'http://photo.sbanzu.com/album/'):
			url=url.replace(u' ',u'')
##			if url[-2:]=='jp':
##				url+='g'
##			elif url[-1]=='.':
##				url+='gif'
##			elif url[-2:]=='bg':
##				url=url[:-1]+'.jpg'

##		imgurl.stat=1
##		imgurl.save()
		# set Referer header
		sr=urlparse.urlsplit(url)
		if sr.scheme in ('http','https') and sr.netloc:
			referer=sr.scheme+'://'+sr.netloc+'/'
		else:
			referer=''

		kwargs={}
		newproxy=False
		# use proxy for some domain
		if sr.netloc and any((x.search(sr.netloc) for x in self.pDomainUseProxy)):
			self.logger.debug('use proxy for %s',sr.netloc)
			kwargs['useproxy']=True
			newproxy=True
		# below code ensure encode/quote url correctly, especially for url like this:
		#  '''http://www.espanolsinfronteras.com/imágenes/Índice de Biografías - Claudio Coello - Carlos II.jpg'''
		idx=url.find(sr.netloc)
		url=url[:idx]+urllib.quote(url[idx:].encode(self.dft_img_encoding))

		trytimes=2
		for _ in range(trytimes):
			self.logger.info('%sgetting %d  %s %s...','%d/%d '%(_+1,trytimes) if _!=0 else '',imgurl.id,imgurl.url,'[proxy]' if 'useproxy' in kwargs else '')
			try:
				r,rurl,code=self._getResponse(url, headers={'Referer':referer}, **kwargs)
			except httplib.BadStatusLine:
				r,rurl,code=None, url,  self.HTTPLIB_BAD_STATUS_LINE

			if code in (404,301,302,errno.ENOENT):
				imgurl.stat=4
			elif code in (self.SOCKET_CONN_TIMED_OUT, self.SOCKET_RECV_TIMED_OUT, socket.EAI_NONAME):
				imgurl.stat=3
				if 'useproxy' not in kwargs:
					self.logger.debug('try use proxy for %s',sr.netloc)
					kwargs['useproxy']=True
					continue
			elif code in (errno.ECONNREFUSED, errno.ECONNRESET, errno.EHOSTUNREACH):
				imgurl.stat=3
				if 'useproxy' not in kwargs:
##					self.logger.debug('try use proxy for %s',sr.netloc)
					kwargs['useproxy']=True
					continue
			else:
				imgurl.stat=2
			break

##		imgurl.save()

		if (r or imgurl.stat==4) and (not newproxy) and ('useproxy' in kwargs): # 前没有用proxy 但现在用了并且返回结果不为空（即使获得404也算是有结果），说明proxy有效
			if self.lck4useproxy:
				with self.lck4useproxy:
					if sr.netloc not in self.use_proxy:
						self.logger.debug('new proxy domain: %s',sr.netloc)
						self.use_proxy.append(sr.netloc)
						self.pDomainUseProxy.append(re.compile(sr.netloc,re.S|re.I))
						open('/home/kevin/newproxy.txt','a').write('%s\n'%sr.netloc)
			else:
				if sr.netloc not in self.use_proxy:
					self.logger.debug('new proxy domain: %s',sr.netloc)
					self.use_proxy.append(sr.netloc)
					self.pDomainUseProxy.append(re.compile(sr.netloc,re.S|re.I))
					open('/home/kevin/newproxy.txt','a').write('%s\n'%sr.netloc)

		if self.nms and self.lck4mcnt:
			with self.lck4mcnt:
				self.nms.mcnt+=1

		if r:
			size=len(r)
			try:
				im=Image.open(StringIO(r))
			except IOError:
				self.logger.debug('\t return file size=%d',size)
				if size>100 and r.startswith('\xff\xd8\xff\xe0\x00\x10JFIF') and r[-2:]=='\xff\xd9':
					self.logger.info('got jpeg which PIL can\'t deal with directly. \n%s',repr(r[:100]))
					return
				imgurl.stat=4 # consider 404 as not found
				imgurl.save()
				tmp=self._tryDecode(r)
				if r.find('html',0,100)!=-1: # is html
					self.logger.debug('\t got html: %s',repr(tmp[:100]))
				else:
					self.logger.debug('\t got:%s',repr(tmp[:100]))
				return


			w,h=im.size
			ext=im.format.lower()
			md5s=hashlib.md5(r).hexdigest()
##			self.logger.info('%s|%s|%d|%dx%d|%s',md5s,ext,size,w,h,imgurl.url)
##			self.logger.info('\t%s|%d|%dx%d',ext,size,w,h)
			imgf,created=ImgFile.objects.get_or_create(filemd5=md5s,defaults={'ext':ext,'size':size,'width':w,'height':h})
			if created:
				if (ext,size,w,h)!=(imgf.ext,imgf.size,imgf.width,imgf.height):
					raise Exception('\tgot md5 collided ? %s|%s|%d|dx%d',md5s,imgf.exit,imgf.size,imgf.width,imgf.height)
				# 保存文件
				path=os.path.join(self.imgfile_base_path,md5s[:2])
				filepath=os.path.join(path,'.'.join((md5s,self.fileext[ext])))
##				self.logger.info('\t%d %s ==> %s ~~~~~',imgurl.id,imgurl.url,filepath)
				try:
					open(filepath,'wb').write(r)
				except IOError,e:
					if e.errno==errno.ENOENT:
						os.mkdir(path)
						open(filepath,'wb').write(r)
					else:
						raise
				self.logger.info('\t%s%d %s saved ~~~~~', '%d) '%self.nms.mcnt if self.nms else '', imgurl.id, imgurl.url)
			else:
				self.logger.info('\t%s%d %s already existed ~~~~~', '%d) '%self.nms.mcnt if self.nms else '', imgurl.id, imgurl.url)
			imgurl.img=imgf
			imgurl.save()

		else:
			self.logger.info('\t%scan\'t get %s ! %s', '%d) '%self.nms.mcnt if self.nms else '', imgurl.url, code)
			if imgurl.stat==4:
				imgurl.save()

	def getImageProcess(self, taskqueue, shutdown, proc_name):
		u'''做为进程池中的进程运行，获得队列 q 中的待处理请求，执行图片文件的获取和保存，以及相关数据库信息的更新。
		'''
		connection.close()
		get=taskqueue.get
		self.logger=self.mlog
		info, debug=self.logger.info, self.logger.debug
		info('worker %s started ~',proc_name)
		getit=self._getImageFromNet
		debug('len(bad_domain)=%d, len(use_proxy)=%d, len(pDomainUseProxy)=%d', len(self.bad_domain),
		     len(self.use_proxy), len(self.pDomainUseProxy))
##		self.bad_domain = bad_domain
##		self.use_proxy = use_proxy
##		self.pDomainUseProxy=[ re.compile(x,re.S|re.I) for x in self.use_proxy ]
##		self.lck4baddomain=lck4baddomain
##		self.lck4useproxy=lck4useproxy
##		self.lck4mcnt=lck4mcnt

		while True:
			k,imgurl=get()
			if k==-1:
				info('%s worker process exit (-1).',proc_name)
				break
##			info('work process got k=%d url=%s kwargs=%s',k,imgurl.url,kwargs)
			getit(imgurl)
			if shutdown.is_set():
				break

		info('%s worker process exited.',proc_name)


	def onURLError_NoName(self,url):
		u'''定义当访问 url 出现socket.EAI_NONAME错误时的动作
		如果返回 False 则不重试
		'''
		sr=urlparse.urlsplit(url)
		if sr.netloc:
			netloc=urllib.unquote(sr.netloc) # change  pic.qnpic.com%3A83 to  pic.qnpic.com:83
			if self.lck4baddomain:
				with self.lck4baddomain:
					if netloc not in self.bad_domain and (netloc not in ('photo.sbanzu.com',) ):
						self.logger.info('bad_domain add %s', netloc)
						self.bad_domain.append(str(netloc))
			else:
				if netloc not in self.bad_domain and (netloc not in ('photo.sbanzu.com',) ):
					self.logger.info('bad_domain add %s', netloc)
					self.bad_domain.append(str(netloc))

		return False


	def onConnection_Refused_Reset_HostUnreach(self,url):
		return self.onURLError_NoName(url)


	def checkRemoveableFile(self, do_remove=False, move_to='/tmp'):
		u''' http://photo.sbanzu.com/album/photos_m/* 是对应 http://photo.sbanzu.com/album/photos/* 的小图。
		本函数计算可以去掉的小图的总size（字节数）。
		'''
		info, debug= self.logger.info, self.logger.debug

		tmp=ImgFile.objects.all().aggregate(totalsize= Sum('size'), totalcnt=Count('filemd5'))
		totalsize, totalcnt =tmp['totalsize'], tmp['totalcnt']
		debug('total: %d bytes, records cnt: %d', totalsize, totalcnt)
		tmp=ImgUrl.objects.filter(
			  img__isnull=False).filter(
		      url__startswith='http://photo.sbanzu.com/album/photos_m/').aggregate(processcnt=Count('id'))
		processcnt=tmp['processcnt']
		debug('should process records: %d', processcnt)

		ul=ImgUrl.objects.filter(
			  img__isnull=False).filter(
		      url__startswith='http://photo.sbanzu.com/album/photos_m/').order_by('id').iterator()

		cnt, canremovecnt, canremovesize = 0, 0, 0
		for item in ul:
			cnt+=1
##			if cnt%1000==0:
##				info('processed %d ...', cnt)
##				raw_input('press ENTER to continue ...')
			try: # 查找相应的大图
				fu=ImgUrl.objects.get(url__exact=item.url.replace('http://photo.sbanzu.com/album/photos_m/', 'http://photo.sbanzu.com/album/photos/'))
			except ImgUrl.DoesNotExist: # 相应的大图记录根本不存在
				debug('%d %s has no corresponding full img record!', item.id, item.img_id)
			else:
				if fu.img is not None: # 相应的大图存在
					if fu.img!=item.img:  # 小图和大图不相同
##						debug('%d %s != %d %s', item.id, item.img_id, fu.id, fu.img_id)
						# 构造小图的文件路径并检查其是否存在
						path=os.path.join(self.imgfile_base_path,item.img.filemd5[:2])
						filepath=os.path.join(path,'.'.join((item.img.filemd5,self.fileext[item.img.ext])))
						if os.path.exists(filepath):
##							debug('%s ==> %s ~~~~~~~~', item.url, filepath)
							canremovecnt+=1
							canremovesize+=item.img.size

							if do_remove:
								raw_input('should debug!')
								tmpimg=item.img
								# 使小图实际指向大图
								item.img=fu.img
								item.save()

								# 如果没有其他指向此img，则delete 对应的记录和文件
								if not ImgUrl.objects.filter(img=tmpimg).exists():
									item.img.delete()
									# move/remove 小图文件
									if move_to:
										shutil.move(filepath, move_to)
									else:	# remove/unlink 小图文件
										os.remove(filepath)
						else: # 小图文件不存在，此为异常情况
							debug('%s ==> %s does not exists !!!', item.url, filepath)
##					else: # 小图和大图已经是一致的了，不必处理
##						pass
##						debug('%d == %d', item.id, fu.id)
				else: # 相应的大图不存在
					debug('%d %s corresponding full img(%d) is null', item.id, item.img_id, fu.id)

		info('total records: %d (%d bytes); \n\tprocessed records: %d, can remove %d bytes (%d records). \n\tsize percent: %.1f%%',
				totalcnt, totalsize, cnt, canremovesize, canremovecnt, float(canremovesize)*100/totalsize)








os.environ['DJANGO_SETTINGS_MODULE']='postgetter.settings'
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
# 重要 为了能使用django的orm模块，需要创建目录btView，在里面放上空
#  文件__init__.py和文件models.py,这样django就会去数据库中找btView_Video,
#  btView_Screenshot，btView是创建的另一个ajango应用的app_name
# 如何单独使用ajango的orm模块见 http://wiki.woodpecker.org.cn/moin/UsingDjangoAsAnStandaloneORM
from postgetter.getpost.models import Forum,Sector, Post, Reply, ImgFile, ImgUrl
from django.db.models import Max, Min, Count, Avg, Sum
from django.db.models import Q, F
from django.db import transaction, IntegrityError
from django.db import connections
from django.db import connection
from postgetter.settings import DEBUG
if __name__=='__main__':
	parser=OptionParser()
	parser.add_option('-i','--id_from',type='int',dest='id_from',help='get img from imgurl.id',default=0)
	parser.add_option('-n','--nr_limit',type='int',dest='nr_limit',help='number of url to process',default=0)
	parser.add_option('-m','--nr_process',type='int',dest='nr_process',help='number of subprocess to get image',default=6)
	parser.add_option('-f','--forum_name',type='string',dest='forum_name',help='forum name',default='sbz')
	parser.add_option('-s','--sector_name',action='append',type='string',dest='sector_name',help='sector name',default=['sbz-自然科学','sbz-古代战争'])
	(options,args)=parser.parse_args()

	reload(sys)
	sys.setdefaultencoding('utf-8')
	logging.info('Time Zone: %s',os.environ['TZ']) #	os.environ['TZ']='Asia/Shanghai'

	o=getImgFile('','~/data_bk/bbsimgfile/')
##	o.checkRemoveableFile()
##	exit()
##	o.getImageUrl('sbz','sbz-古代战争',967014)
##	exit()

	# get max id of reply processed from ini file
	o.loadCfg()
	org_processed_maxid, new_processed_maxid = o.processed_maxid, 0
	for s in options.sector_name:
		o.getImageUrl(options.forum_name,s,org_processed_maxid)
		if new_processed_maxid<o.processed_maxid:
			new_processed_maxid=o.processed_maxid
	o.processed_maxid=new_processed_maxid
	o.saveCfg()

	o.getImage(options.id_from,options.nr_limit,options.nr_process)

	logging.debug('done')
	raw_input('press enter to exit ...')
