from imgSize import getImageInfo
import urllib2
from urlparse import urlparse
from proxy import setup_proxy
import re
from common import *

def crawl_mrjh(url):
	# cut http://www.mrjh.org/gallery.php?entry=images/1325605445.jpg
	# to
	# http://www.mrjh.org/images/1325605445.jpg
	a,b,c = url.partition('gallery.php?entry=')
	url2 = a+c
	req = urllib2.Request(url2)
	req.add_header('referer', url)
	return urllib2.urlopen(req).read()

def crawl_imgiga(url):
	pass

def crawl_imgchili(url):
	html = urllib2.urlopen(url).read()
	url2 = re.search(r'http://i\d.imgchili.com/.*?\.jpg', html).group(0)
	req = urllib2.Request(url2)
	req.add_header('referer', url)
	return urllib2.urlopen(req).read()

def crawl_privaus(url):
	#change http://privaus.com/show/786/786875_s2m-001a.jpg
	# to
	# http://imgchili.com/show/786/786875_s2m-001a.jpg
	url2 = 'http://imgchili.com/' + url[19:]
	return crawl_imgchili(url2)

def crawl_imagecarry(url):
	# locate <img src="http://img80.imageporter.com/i/00786/we78d4cx61yy.jpg"
	html = urllib2.urlopen(url).read()
	url2 = re.search(r"http://.*imageporter.com/[/0-9a-zA-Z]*\.jpg", html).group(0)
	return urllib2.urlopen(url2).read()

def skip_linker(url):
	if ('tinybucks.net' in url) or ('zff.co' in url) or ('pickbucks' in url): # need loop
		#extract the url after "Lbjs.TargetUrl = '"
		html = urllib2.urlopen(url).read()
		beg = html.find("Lbjs.TargetUrl")+18
		end = html.find("';", beg)
		url = html[beg:end]
	return url

visited_images = set()
class ImgSpider:
	global visited_images
	direct_hosts = []
	known_indirect_hosts = {
		'www.mrjh.org':crawl_mrjh,
		'imagecarry.com':crawl_imagecarry,
		'privaus.com':crawl_privaus,
		#'img001.imgiga.com':crawl_imgiga,
		'imgchili.com':crawl_imgchili
	}

	def __init__(self):
		with open('directImgHosts', 'r') as f:
			for line in f:
				self.direct_hosts.append(line)
		setup_proxy()
	def end_job(self):
		f = open('directImgHosts', 'w+')
		f.writelines(self.direct_hosts)
		f.close()

	# crawl the image and write it to images/
	def crawl_image(self, url_, name, direct=False):
		global err_file
		if name in visited_images:
			return
		else:
			visited_images.add(name)
		url = skip_linker(url_)
		site = urlparse(url).netloc #e.g. www.example.com
		try:
			if direct or (site in self.direct_hosts):
				img = urllib2.urlopen(url).read()
			elif site in self.known_indirect_hosts:
				img = self.known_indirect_hosts[site](url)
			else:
				img = urllib2.urlopen(url).read()
				t,w,h = getImageInfo(img)
				if t == '':
					err_file.write('!!!!Unhandled indirect host:\n\t%s\n' % url_)
					return
				elif w<200 or h<100:
					err_file.write('!!!!Unhandled thumb(%d x %d):\n\t%s\n' % (w,h,url_))
					return
				self.direct_hosts.append(site)	
			print 'FETCH IMAGE OK:\n\t%s' % url_
			out = open('images/'+name, 'wb+')
			out.write(img)
			print 'WRITE IMAGE OK:\n\t%s' % url_
		except:
			print "Exception@imgSpider.py:\n\t%s\n" % url_
