import re,sys,os,socket
from urllib2 import *
from threading import Thread
from Queue import Queue
import time,gzip,tempfile,string,random
from handler import handler

class e_hentai_handler(handler):
	def __init__(self):
		handler.__init__(self)
		self.header={'Cookie': 
	       "tips=1; __utmz=185428086.1295703577.1.1.utmcsr=google|utmccn=(organic)|utmcmd=organic|utmctr=g-hentai; ipb_member_id=486170; ipb_pass_hash=44121f852838f4237db3e35deb49d91c; lv=1295703660-1295703660; uconfig=tl_m-uh_y-cats_0-ts_m-tr_1-prn_y-dm_l-rx_0-ry_0-sa_y-oi_n-qb_n-tf_n-hp_-hk_; __utma=185428086.1683565909.1295703577.1295703577.1295703577.1; __utmc=185428086; __utmb=185428086.11.10.1295703577"}
	def sub_extract_url_lst(self,url):
		page=self.try_to_read_url(url,header=self.header)
		l=re.findall("<div class=\"gdtm.*?</div>",page)
		l=[re.findall("href=\"([^\"]*)\"",s)[0] for s in l]
		for i in range(len(l)):
			l[i]=l[i].replace("&amp;","&")
		return l
	def extract_url_lst(self,url):
		page=self.try_to_read_url(url,header=self.header)
		l=re.findall("<a href=\"#\" onclick=\"return false\">(\d*)</a>",page)
		n=max([int(x) for x in l])
		if url.find("?")!=-1:url=url[:url.rfind("?")]
		l=[url+"?p=%d" % i for i in range(n)]
		res=[]
		for s in l: 
			#sleep between consequtive page reading
			time.sleep(2)
			res.extend(self.sub_extract_url_lst(s))
		while 1:
			try:
				page=self.try_to_read_url(res[0],header=self.header)
				self.ext=re.findall("<img src=\"[^\"]*\.(jpg|png|gif)\" style=",page)[0]
				break
			except:
				self.write_log(page)
				time.sleep(2)

		return res
	def download_img(self,url,file_name):
		while 1:
			try:
				page=self.try_to_read_url(url,header=self.header)
				#extension may change. update self.ext for every imge
				self.ext=re.findall("<img src=\"[^\"]*\.(jpg|png|gif)\" style=",page)[0]
				file_name=file_name[:file_name.rfind(".")]+"."+self.ext
				url=re.findall("<img src=\"([^\"]*\.%s)\" style=" % self.ext, page)[0]
				break
			except:
				self.write_log(page)
				time.sleep(2)
			
		time.sleep(2)
		url=url.replace("&amp;","&")
		handler.download_img(self,url,file_name)

	def get_dir_name(self,dir,url):
		if not dir.endswith("\\"):
			dir+="\\"
		if url.endswith("/"):url=url[:-1]
		page=self.try_to_read_url(url,header=self.header)
		title=re.findall("<title>(.*?)</title>",page)[0]
		title=title.replace(" ","_")
		for c in title:
			if c in string.letters[:52]:
				dir+=c
		dir+="\\"
		return dir
