#! /usr/bin/python
import urllib2,base64
import sys, os, StringIO
import re
from proxy import setup_proxy
from HTMLParser import HTMLParser
from torrentSpider import fetch_torrent
from imgSpider import ImgSpider
from common import *
import shutil #for rmtree
from ranges import Ranges

fetched = Ranges()

def img_local_name(url):#base32 of the part after 'http://'
	raw = url.lstrip().rstrip() # remove leading/trailing whitespaces
	if raw[0:7] == 'http://':
		raw = raw[7:]
	return base64.b32encode(raw) + '.jpg'
	# base64.b32decode(...)

# AVTaken "JAV Torrents" thread page
class AvtF4ThreadParser(HTMLParser):
	#when full: div, a/img
	tag_stack = []
	ahref = ''
	direct_img_links = []
	indirect_img_links = []
	torrent_links = []
	description = ''
	def handle_starttag(self, tag, attrs):
		if len(self.tag_stack)==0 and tag == 'div':
				for k,v in attrs:
					if k=='style' and v=='font-size: 12px':
						self.tag_stack.append('div')
						break
		elif len(self.tag_stack)==1:
			if tag=='img': #direct image
				for k,v in attrs:
					if k=='src':
						self.direct_img_links.append(v)
			elif tag=='a':
				self.tag_stack.append('a')
				for k,v in attrs:
					if k=='href':
						self.ahref = v
		elif len(self.tag_stack)==2:
			if tag=='img': #thumb
				self.indirect_img_links.append(self.ahref)

	def handle_endtag(self, tag):
		if len(self.tag_stack)>0 and tag==self.tag_stack[-1]:
			self.tag_stack.pop()
			if tag=='a':
				self.ahref = ''

	def handle_data(self, data):
		if len(self.tag_stack)==2 and self.tag_stack[-1]=='a':
			if 'http://' in data:
				self.torrent_links.append(data)
				return
		if len(self.tag_stack)>0:
			#if re.match(r'\s+', data):
			#	print '<%s>' % data
			#	data = ' '
			#awk '/[^\r\n ]/ {print $0}' out
			self.description = self.description + data
	def parse_thread(self, url):
		self.reset()
		ahref = ''
		self.tag_stack = []
		self.direct_img_links = []
		self.indirect_img_links = []
		self.torrent_links = []
		self.description = ''
		print 'Fetching Thread %s' %url
		html = urllib2.urlopen(url).read()
		print 'Parsing Thread %s' %url
		self.feed(html)
		return (self.description
				,self.torrent_links
				,self.direct_img_links
				,self.indirect_img_links)


# AVTaken "JAV Torrents" list page
class AvtF4ListParser(HTMLParser):
	tlist = []
	t = {'url':'', 'title':'', 'date':'', 'tid':None}
	date_found = False
	regex_url = re.compile(r'viewthread.php\?tid=([0-9]*)&')
	#5-1-2012, 22-12-2011
	regex_date = re.compile(r'[1-3]?[0-9]-1?[0-9]-20[0-9]{2}')
	def handle_starttag(self, tag, attrs):
		if tag == 'a':
			for k,v in attrs:
				if k == 'href':
					m = self.regex_url.search(v)
					if m != None:
						self.t['tid'] = int(m.group(1))
						self.t['url'] = 'http://avtaken.com/bbs/viewthread.php?tid='+m.group(1)
		elif tag == 'span':
			for k,v in attrs:
				if k=='class' and v=='smalltxt lighttxt':
					self.date_found = True

	def handle_data(self, data):
		if self.t['url']!='' and self.t['title']=='':
			self.t['title'] = data
		elif self.date_found and self.t['url']!='' and self.t['title']!='':
			if self.regex_date.match(data):
				self.t['date'] = data
				self.tlist.append(self.t)
				self.t = {'url':'', 'title':'', 'date':'', 'tid':None}
				self.date_found = False

	def fetch_threads(self):
		setup_proxy()
		self.tlist = []
		self.t = {'url':'', 'title':'', 'date':'', 'tid':None}
		self.date_found = False
		for i in range(4,8):
			url = 'http://avtaken.com/bbs/forumdisplay.php?fid=4&page=%d'%i
			print 'FETCHING %s...' %url
			html = urllib2.urlopen(url)
			print 'PARSING %s...' %url
			self.feed(html.read())
			self.close()
		print "####Finished retrieving threads' urls."
		return self.tlist

n_page = 0
def proc_thread(
		title,
		url,#perma_link
		desp,#description
		tlinks,# torrent_links
		dimgs,#direct_image_links
		iimgs):#indirect_image_links
	print 'PROCESSING THREAD: %s' % title
	torrents = [] # <hash>.torrent

	for t in tlinks:
		thash = fetch_torrent(t)
		if thash!='':
			torrents.append('%s.torrent' % thash)
	
	if len(torrents)==0:
		return False

	global n_page
	fhtml = open('pages/%d.html'%n_page, 'w+')
	fhtml.write('<html>')
	head = '''<head>
		<meta http-equiv="content-type" content="text/html; charset=UTF-8">
		<style>
			.footer{position:fixed; left:0px; right:0px; bottom:0px; width:100%;background-color:#888;}
			.container{margin-bottom:60px;}
			body{text-align:center}
		</style>
		</head>'''
	fhtml.write(head)
	fhtml.write('<body><div class="container"><h2>%s</h2>PermaLink:<a href="%s">%s</a><hr/>' % (title,url,url))
	fhtml.write('<pre>%s</pre>' % desp)
	for di in dimgs:
		img = img_local_name(di)
		fhtml.write('<img src="../images/%s" alt="%s" /><br/>' % (img,di))
	for ii in iimgs:
		img = img_local_name(ii)
		fhtml.write('<img src="../images/%s" alt="%s" /><br/>' % (img,ii))
	fhtml.write('</div><div class=footer>')
	fhtml.write('<a href="%d.html">Prev</a> | ' % (n_page-1))
	for t in torrents:
		fhtml.write('<a href="%s">Torrent</a> |' % ('../torrents/'+t))
	fhtml.write(' <a href="%d.html">Next</a>' % (n_page+1))
	fhtml.write('</div></body></html>')
	fhtml.close()
	n_page = n_page+1
	return True
	
def fetch():
	global err_file
	global fetched
	err_file.write('=====%s=====\n' % datetime.now().strftime('%y-%m-%d %H:%M:%S')) 
	lparser = AvtF4ListParser()
	tparser = AvtF4ThreadParser()
	imgSpider = ImgSpider()
	tlist = lparser.fetch_threads() # get threads list
	regex_avid = re.compile(r'([A-Za-z]+ *-?[0-9]+)|【')
	for t in tlist:
		if fetched.item_in(t['tid']):
			print 'Already fetched %d' % t['tid']
			continue
		desp,tlinks,dimgs,iimgs = tparser.parse_thread(t['url'])
		m = regex_avid.search(t['title'])
		if m==None:
			print 'Skipped thread: %s' % t['title']
			fetched.add_item(t['tid'])
			continue
		succeed = proc_thread(t['title'], t['url'], desp, tlinks, dimgs, iimgs)
		if succeed:
			for di in dimgs:
				img = img_local_name(di)
				imgSpider.crawl_image(di, img, True)
			for ii in iimgs:
				img = img_local_name(ii)
				imgSpider.crawl_image(ii, img, False)
		fetched.add_item(t['tid'])#whether succeed or not
	imgSpider.end_job()

def clean():
	argc = len(sys.argv)
	argv = sys.argv
	if argc>2:
		option = argv[2]
	else:
		option = 'all'
	if option=='all' or option=='image':
		shutil.rmtree('images')
		os.mkdir('images')
	if option=='all' or option=='torrent':
		shutil.rmtree('torrents')
		os.mkdir('torrents')
	if option=='all' or option=='page':
		shutil.rmtree('pages')
		os.mkdir('pages')

def start_job():
	if sys.platform=='win32':
		print 'Windows is NOT supported.\n'
		return
	reload(sys)  
	sys.setdefaultencoding('utf-8')
	import signal
	signal.signal(signal.SIGINT, sigint_handler)
	global fetched
	fetched.deserialize('fetched')
	np_file = open('n_page', 'r')
	m = re.search(r'(\d+)[\n]?', np_file.readline())
	np_file.close()
	if m:
		global n_page
		n_page = int(m.group(1))
		print '%d pages exist' % n_page

def sigint_handler(signum, frame):
	print '\n\nCtrl-C Pressed, quitting...'
	end_job()
	sys.exit()

def end_job():
	err_file.close()
	#write fetched threads' nums
	global fetched
	fetched.serialize('fetched')
	global n_page
	np_file.open('n_page', 'w')
	np_file.write('%d'%n_page)
	np_file.close()


if __name__ == "__main__":
	start_job()
	argc = len(sys.argv)
	argv = sys.argv
	if argc==1:
		print "Options:"
		print "\t* fetch"
		print "\t* clean [all|image|torrent|page]"
	elif argc>1:
		if argv[1]=='clean':
			clean()
		elif argv[1]=='fetch':
			try:
				fetch()
			except:
				end_job()
	end_job()
