#!/usr/bin/python
# Tested with python 2.4 and 2.5

import urllib, urllib2, urlparse, sys, os, socket, re, cookielib, time, math, glob, zipfile, rarfile
from sgmllib import SGMLParser
from optparse import OptionParser

__all__ = ["config", "Configuration", "get_with_retry", "get_files_from_link", "yapget_version", "url_filter", "get_links", "all_url_filter"]

def find_file(fname):
	"""Find a file in a number of default locations"""
	_dirs = [
		os.path.split(sys.argv[0])[0], # where our executable is
		'.',  # where we were run from
		'~', # user's home
	]
	for file_dir in _dirs:
		file_in_dir = os.path.join(file_dir, fname)
		if os.path.isfile(file_in_dir):
			return file_in_dir
	return None

class StatusReport:
	"""Base class for reporting download progress"""
	def report_start(self, url):
		pass

	def report_finish(self, fname):
		pass

	def report_progress(self, percent_str, counter, total_len_str, speed_str, eta_str):
		pass

	def report_completion(self, total_len_str, speed_str, time_str):
		pass

	def report_error(self, str):
		pass

	def report_warning(self, str):
		pass

class StatusReportConsole:
	"""Print error messages to console"""
	def report_start(self, url):
		print "Getting: ", url

	def report_finish(self, fname):
		print "Saved: ", fname

	def report_progress(self, percent_str, counter, total_len_str, speed_str, eta_str):
		sys.stdout.write('\rRetrieving: %5s%% (%8s of %s) at %8s/s ETA %s ' % (percent_str, counter, total_len_str, speed_str, eta_str))
		sys.stdout.flush()

	def report_completion(self, total_len_str, speed_str, time_str):
		sys.stdout.write('\rGot %s at %8s/s during %s                                \n' % (total_len_str, speed_str, time_str))
		sys.stdout.flush()

	def report_error(self, str):
		sys.stderr.write("Error: %s\n" % str)

	def report_warning(self, str):
		print str

class Configuration:
	"""Keep track of our configuration received from user"""
	def __init__(self):
		self.rs_user = None
		self.rs_passwd = None
		self.cookies_loaded = False
		self.cookies = None
		self.cookie_file = 'cookies.lwp'
		self.retries = 3
		self.report = None
		self.dl_dir = '.'
		self.cookie_dir = '.'
		self.stop_getting = False
		self.overwrite_existing = False
		self.tmo = 15.0
		self.no_proxy = False
		self.explicit_proxy = None
		self._set_eol()

	def _set_eol(self):
		if sys.platform.startswith('win') or sys.platform in ('mingw', 'cygwin', 'dos', 'os2'):
			self.eol = '\r\n'
		elif sys.platform in ('mac', 'darwin'):
			self.eol = '\r'
		else:
			self.eol = '\n'

	def initialize(self, rs_user, rs_passwd, retries, report, overwrite, dl_dir, tmo):
		self.rs_user = rs_user
		self.rs_passwd = rs_passwd
		self.report = report
		self.overwrite_existing = overwrite
		self.set_retries(retries)
		self.set_dl_dir(dl_dir)
		self.set_tmo(tmo)

	def set_retries(self, retries):
		self.retries = int(retries)
		
	def set_dl_dir(self, dl_dir):
		if not os.path.isdir(dl_dir):
			os.mkdir(dl_dir)
		self.dl_dir = os.path.abspath(dl_dir)

	def set_tmo(self, tmo):
		self.tmo = float(tmo)
		socket.setdefaulttimeout(self.tmo)

	def init_comm(self, cookie_dir, no_proxy, explicit_proxy = None):
		"""Initialize all urllib2 stuff"""
		self._set_cookiedir(cookie_dir)
		self.no_proxy = no_proxy
		self.explicit_proxy = explicit_proxy

		if explicit_proxy:
			opener = urllib2.build_opener(urllib2.ProxyHandler({'http':explicit_proxy}), urllib2.HTTPCookieProcessor(self.cookies))
		else:
			if no_proxy:
				opener = urllib2.build_opener(urllib2.ProxyHandler({}), urllib2.HTTPCookieProcessor(self.cookies))
			else:
				opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookies))

		# Internet Explorer 6.0 on XP
		opener.addheaders = [('User-Agent', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322)')]
	
		# Now all calls to urllib2.urlopen will use our opener
		urllib2.install_opener(opener)

	def _set_cookiedir(self, cookie_dir):
		self.cookies = cookielib.LWPCookieJar(os.path.join(cookie_dir, self.cookie_file))
		self.cookie_dir = cookie_dir
		if self.cookies_loaded:
			self.cookies_loaded = False
			CookiesLoaded()

	def __getstate__(self):
		return (self.rs_user, self.rs_passwd, self.cookies_loaded, self.cookie_file, self.retries, self.dl_dir, self.cookie_dir, self.overwrite_existing, self.tmo, self.no_proxy, self.explicit_proxy)

	def __setstate__(self, state):
		self.rs_user, self.rs_passwd, self.cookies_loaded, self.cookie_file, self.retries, self.dl_dir, self.cookie_dir, self.overwrite_existing, self.tmo, self.no_proxy, self.explicit_proxy = state
		self._set_eol()
		self.set_tmo(self.tmo)
		self.set_dl_dir(self.dl_dir)
		self.init_comm(self.cookie_dir, self.no_proxy, self.explicit_proxy)

config = Configuration()

def CookiesLoaded():
	"""Load cookies if needed"""
	if not config.cookies_loaded:
		try:
			config.cookies.load()
			config.cookies_loaded = True
		except IOError:
			config.report.report_warning("Warning: No cookie file present")
	
	return config.cookies_loaded

class CantResume(Exception):
	"""An exception that is thrown when we can't resume download"""
	pass

class NoRapidaAuth(Exception):
	"""An exception that is thrown when rapidshare authentication failed"""
	pass

class CantDownload(Exception):
	"""An exception occured while trying to download"""
	pass

def html2ascii(text):
	"""Returns the ASCII decoded version of the given HTML string"""
	_xmlCodes = [
		['"', '&quot;'],
		['>', '&gt;'],
		['<', '&lt;'],
		['&', '&amp;'],
	]
	codes = _xmlCodes
	for code in codes:
		text = text.replace(code[1], code[0])
	return text

class URLLister(SGMLParser):
	"""Class for getting all the links on an HTML page"""
	def reset(self):
		SGMLParser.reset(self)
		self.urls = []
		self.image_urls = []
		self.script_urls = []
	
	def start_a(self, attrs):
		href = [v for k, v in attrs if k == 'href']
		if href:
			self.urls.extend(href)
	
	def start_img(self, attrs):
		src = [v for k, v in attrs if k == 'src']
		if src:
			self.image_urls.extend(src)
	
	def start_script(self, attrs):
		src = [v for k, v in attrs if k == 'src']
		if src:
			self.script_urls.extend(src)

def get_links(url, filter = ''):
	"""Get all the links on page"""
	parser = URLLister()
	parser.feed(urllib2.urlopen(url).read())
	result = []
	pattern = re.compile(filter)
	for url in parser.urls:
		if pattern.search(url):
			result.append(html2ascii(url))
	
	for url in parser.image_urls:
		if pattern.search(url):
			result.append(html2ascii(url))
	
	parser.close()
	return result

def open_unique_file(fname, index = 0):
	"""Open a file and make it's filename unique if another one with the same name exists"""
	if index != 0:
		local_file = os.path.join(config.dl_dir, "%s_%d" % (fname, index))
	else:
		local_file = os.path.join(config.dl_dir, fname)
	
	if config.overwrite_existing:
		return file(local_file, 'wb'), local_file
	
	if os.path.exists(local_file):
		return open_unique_file(fname, index + 1)
	
	return file(local_file, 'wb'), local_file

def creat_unique_directory(directory, index = 0):
	"""Create a directory that is unique if another one with the same name exists"""
	if index != 0:
		local_dir = os.path.join(config.dl_dir, "%s_%d" % (directory, index))
	else:
		local_dir = os.path.join(config.dl_dir, directory)
	
	if config.overwrite_existing:
		if not os.path.isdir(local_dir):
			os.mkdir(local_dir)
		return local_dir
	
	if os.path.exists(local_dir):
		return creat_unique_directory(directory, index + 1)

	os.mkdir(local_dir)
	return local_dir

const_1k = 1024
const_initial_block_size = 10 * const_1k
const_epsilon = 0.0001

def new_block_size(before, after, bytes):
	"""Calculate new block size based on previous block size"""
	new_min = max(bytes / 2.0, 1.0)
	new_max = max(bytes * 2.0, 1.0)
	dif = after - before
	if dif < const_epsilon:
		return int(new_max)
	rate = bytes / dif
	if rate > new_max:
		return int(new_max)
	if rate < new_min:
		return int(new_min)
	return int(rate)

def optimum_k_exp(num_bytes):
	"""Get optimum 1k exponent to represent a number of bytes"""
	global const_1k
	if num_bytes == 0:
		return 0
	return long(math.log(num_bytes, const_1k))

def format_bytes(num_bytes):
	"""Get optimum representation of number of bytes"""
	global const_1k
	try:
		exp = optimum_k_exp(num_bytes)
		suffix = 'bkMGTPEZY'[exp]
		if exp == 0:
			return '%s%s' % (num_bytes, suffix)
		converted = float(num_bytes) / float(const_1k**exp)
		return '%.2f%s' % (converted, suffix)
	except IndexError:
		sys.exit('Error: internal error formatting number of bytes.')

def format_time(start, end):
	"""Format time difference and return it in string format as MM:SS"""
	dif = end - start
	time_mins = dif / 60
	time_secs = dif % 60
	if time_mins > 99:
		return '--:--'
	return '%02d:%02d' % (time_mins, time_secs)

def calc_eta(start, now, total, current):
	"""Calculate ETA and return it in string format as MM:SS"""
	dif = now - start
	if current == 0 or dif < const_epsilon:
		return '--:--'
	rate = float(current) / dif
	eta = long((total - current) / rate)
	eta_mins = eta / 60
	eta_secs = eta % 60
	if eta_mins > 99:
		return '--:--'
	return '%02d:%02d' % (eta_mins, eta_secs)

def calc_speed(start, now, bytes):
	"""Calculate speed and return it in string format"""
	dif = now - start
	if bytes == 0 or dif < const_epsilon:
		return 'N/A b'
	return format_bytes(float(bytes) / dif)

def download_file(dst_file, response):
	"""Download data from http stream and save it to a file"""
	byte_counter = 0
	block_size = const_initial_block_size
	start_time = time.time()
	try:
		total_len = long(response.headers.dict["content-length"])
		total_len_str = format_bytes(total_len)
	except KeyError:
		total_len = None
		total_len_str = 'N/A'
	
	while True:
		if total_len is not None:
			percent = float(byte_counter) / float(total_len) * 100.0
			percent_str = '%.1f' % percent
			eta_str = calc_eta(start_time, time.time(), total_len, byte_counter)
		else:
			percent_str = '---.-'
			eta_str = '--:--'
		
		counter = format_bytes(byte_counter)
		speed_str = calc_speed(start_time, time.time(), byte_counter)
		config.report.report_progress(percent_str, counter, total_len_str, speed_str, eta_str)
		
		if config.stop_getting:
			break
		before = time.time()
		try:
			data = response.read(block_size)
		except socket.error:
			break
		after = time.time()
		dl_bytes = len(data)
		if dl_bytes == 0:
			break
		byte_counter += dl_bytes
		dst_file.write(data)
		block_size = new_block_size(before, after, dl_bytes)

	speed_str = calc_speed(start_time, after, byte_counter)
	time_str = format_time(start_time, after)
	if not config.stop_getting:
		config.report.report_completion(total_len_str, speed_str, time_str)
	return byte_counter

def download_file_with_retries(fname, remote_url, is_post = False):
	"""Download a file and perform retries if necessary, also closes the connection"""
	if is_post:
		response = urllib2.urlopen(remote_url, '')
	else:
		response = urllib2.urlopen(remote_url)
	
	dst_file, filename = open_unique_file(fname)
	try:
		if config.stop_getting:
			config.stop_getting = False
			return
		
		try:
			total_size = long(response.headers.dict["content-length"])
		except KeyError:
			config.report.report_warning("Could not get content size")
			download_file(dst_file, response)
			return
		
		got = 0
		while got < total_size:
			if config.stop_getting:
				config.stop_getting = False
				return
			if got != 0:
				config.report.report_warning("Retrying %s from %d" % (filename, got))
				req = urllib2.Request(response.geturl())
				req.add_header("Range", "bytes=%d-" % got)
				response = urllib2.urlopen(req)
				if response.code != 206: # server does not suppor partial requests
					dst_file.close()
					os.unlink(filename)
					raise CantResume("Server does not support resumes")
			got += download_file(dst_file, response)
	finally:
		if not dst_file.closed:
			dst_file.close()

def get_img_url(url, filter):
	"""Get URL of an image based on some filter"""
	res = ''
	parser = URLLister()
	parser.feed(urllib2.urlopen(url).read())
	for img_url in parser.image_urls:
		if img_url.find(filter) != -1:
			res = img_url
			break
	parser.close()
	return res

def get_filename_from_url(url):
	"""Get the filename part from the URL"""
	parsed_url = urlparse.urlparse(url)
	if parsed_url[4] != '':
		local_file = parsed_url[4].split('=')[-1]
	else:
		local_file = parsed_url[2].split('/')[-1]
	loc = local_file.find('.html')
	if loc != -1:
		local_file = local_file[:loc]
	return local_file

def get_regular(url):
	"""Just get a file from URL"""
	local_file = get_filename_from_url(url)
	download_file_with_retries(local_file, url)
	config.report.report_finish(local_file)

def cookie_present(site):
	"""Check if we have cookies for a certain file"""
	for index, data in enumerate(config.cookies):
		if data.domain.find(site) != -1:
			return True
	return False

def auth_rapidshare():
	"""Authenticate with rapidshare.com, so we can download freely afterwards"""
	CookiesLoaded()
	if not cookie_present('rapidshare.com'):
		if (not config.rs_user) or (not config.rs_passwd):
			raise NoRapidaAuth("Need both rapidshare username and password to D/L from rapidshare")
		
		post_data = urllib.urlencode({'login': config.rs_user, 'password': config.rs_passwd})
		url = 'https://ssl.rapidshare.com/cgi-bin/premiumzone.cgi'
		for line in urllib2.urlopen(url, post_data).read().splitlines():
			if line.find('E#1') != -1:
				raise NoRapidaAuth("Premium-Account not found at RapidShare.com")
			elif line.find('E#2') != -1:
				raise NoRapidaAuth("Account found, but password is incorrect")

		config.cookies.save()
		config.cookies_loaded = True
		if not cookie_present('rapidshare.com'):
			raise NoRapidaAuth("Unknown error while authenticating rapdishare premium account")

def get_from_rapidshare(url):
	"""Just get a file from rapidshare"""
	auth_rapidshare()
	
	local_file = get_filename_from_url(url)
	download_file_with_retries(local_file, url)
	config.report.report_finish(local_file)

def get_from_jpghosting(url):
	"""Get a single file from jpghosting"""
	parsed_url = urlparse.urlparse(url)
	if parsed_url[1].find('fileserver') == -1: # this is not a direct URL
		url_to_search = parsed_url[4].split('image=')[1]
		url = get_img_url(url, url_to_search)
		parsed_url = urlparse.urlparse(url)

	local_file = parsed_url[2].split('images/')[1]
	download_file_with_retries(local_file, url)
	config.report.report_finish(local_file)

def cleanup_filename(fname):
	"""Clean up filenames from imagevenue"""
	parts = fname.split('_')
	final_name = parts[1]
	for part in parts[2:-1]:
		final_name = final_name + '_' + part
	return final_name + ".jpg"

def get_from_imagevenue(url):
	"""Get a single file from imagevenue"""
	parsed_url = urlparse.urlparse(url)
	if parsed_url[4] == '':
		config.report.report_warning("Image probably is a thumbnail, aborting download")
		return
	
	src_pat = 'SRC="'
	remote_file = None
	# Find the line with our file location
	for line in urllib2.urlopen(url).read().splitlines():
		if line.find(src_pat) != -1:
			remote_file = '/' + line.split(src_pat)[1].split('"')[0]
			break
		elif line.find('This image does not exist on this server') != -1:
			config.report.report_warning('Image does not exist at URL "%s"' % url)
			return
	
	if not remote_file:
		raise CantDownload('Error occured while trying to download')
	
	local_file = cleanup_filename(parsed_url[4].split('image=')[1])
	download_file_with_retries(local_file, urlparse.urljoin(url, remote_file))
	config.report.report_finish(local_file)

def get_from_hotlinkimage(url):
	"""Get a single file from hotlinkimage"""
	parsed_url = urlparse.urlparse(url)
	if parsed_url[2].find('thumb') != -1:
		config.report.report_warning("Image probably is a thumbnail, aborting download")
		return
	
	parser = URLLister()
	parser.feed(urllib2.urlopen(url).read())
	for script_url in parser.script_urls:
		if script_url.find('hotlinkimage.com/showimg.php') != -1:
			url = script_url
			break
	parser.close()
	
	src_pat = 'src="'
	# Find the line with our file location
	for line in urllib2.urlopen(url).read().splitlines():
		if line.find(src_pat) != -1:
			url = line.split(src_pat)[1].split('"')[0]
			break
	
	local_file = get_filename_from_url(url)
	download_file_with_retries(local_file, url)
	config.report.report_finish(local_file)

def get_from_imagebeaver(url):
	"""Get a single file from imagebeaver"""
	url = get_img_url(url, 'www1.imagebeaver.com')
	
	local_file = get_filename_from_url(url)
	download_file_with_retries(local_file, url)
	config.report.report_finish(local_file)

def get_from_imagesocket(url):
	"""Get a single file from imagesocket"""
	url = get_img_url(url, 'content.imagesocket.com/images/')
	
	local_file = get_filename_from_url(url)
	download_file_with_retries(local_file, url)
	config.report.report_finish(local_file)

def get_from_uploadedto(url):
	"""Get a single file from www.uploaded.to"""
	post_pat = 'post" action="'
	titl_pat = '<title>'
	local_file = None
	remote_url = None
	first_try = True
	while True:
		data = urllib2.urlopen(url).read()
		account_blocked = False
		# Find the line with our file location
		for line in data.splitlines():
			if line.find(post_pat) != -1:
				remote_url = line.split(post_pat)[1].split('"')[0]
				break
			elif line.find(titl_pat) != -1:
				local_file = line.split(' ...')[0].split(titl_pat)[1].strip()
			elif line.find('Or wait 1 hour') != -1:
				account_blocked = True
				break
		if account_blocked:
			if first_try:
				config.report.report_warning('uploaded.to blocked us for an hour')
				for x in xrange(65 * 60):
					if config.stop_getting:
						config.stop_getting = False
						return
					time.sleep(1)
			else:
				raise CantDownload("Unable to download from uploaded.to even after 1 hour sleep")
		else:
			break
		first_try = False
	
	if not remote_url or not local_file:
		raise CantDownload('Error occured while trying to download')
	
	download_file_with_retries(local_file, remote_url, True)
	config.report.report_finish(local_file)

def get_from_usercash(url):
	"""Get a single file from userchash"""
	titl_pat = '<TITLE>'
	remote_url = None
	for line in urllib2.urlopen(url).read().splitlines():
		if line.find(titl_pat) != -1:
			remote_url = line.split(titl_pat)[1].split('</TITLE>')[0]
			break
	
	if not remote_url:
		raise CantDownload('Error occured while trying to download')
	get_with_retry(remote_url)

def gen_url(url, number):
	"""Generate url for jimslip based on start url and number"""
	parsed_url = urlparse.urlparse(url)
	url_new = parsed_url[2].split('q/large/')[0].split('/p/')[0] + '/p/' + str(number) + '/q/large/index.html'
	fname = parsed_url[2].split('/')[2] + '_' + "%03d" % number + '.jpg'
	return (parsed_url[0] + '://' + parsed_url[1] + url_new, fname)

def get_from_jimslip(url):
	"""Get all the images from jimslip.com"""
	parsed_url = urlparse.urlparse(url)
	directory = parsed_url[2].split('/')[2]
	try: os.mkdir(os.path.join(config.dl_dir, directory))
	except: pass

	curr = 1
	while True:
		curr_url, local_file = gen_url(url, curr)
		parser = URLLister()
		parser.feed(urllib2.urlopen(curr_url).read())
		
		for img_url in parser.image_urls:
			my_url = img_url
			break
	
		parser.close()
		curr += 1
		download_file_with_retries(os.path.join(directory, local_file, my_url))
		config.report.report_finish(local_file)

def get_valid_name(name):
	"""Get valid file/directory name"""
	return name.replace('&', 'and').replace(':', '').replace('|', '').replace('/', '').replace('\\', '').replace('<', '').replace('>', '').replace('"', '').replace('?', '').replace('*', '')

def unzip_file(fname):
	"""Unzip a single file and delete the original zip afterwards"""
	directory, zip_name = os.path.split(fname)
	if directory == '':
		directory = '.'
	
	arch_file = zipfile.ZipFile(fname, 'r')
	if arch_file.testzip(): # either the archive is corrupt, or we can't read it...
		return

	for name in arch_file.namelist():
		file(os.path.join(directory, os.path.split(name)[1]), 'wb').write(arch_file.read(name))

	arch_file.close()
	os.unlink(fname)

def unrar_file(fname):
	"""Unrar a single file and delete the original rar afterwards"""
	directory, rar_name = os.path.split(fname)
	if directory == '':
		directory = '.'
	
	arch_file = rarfile.RarFile(fname, 'r')

	for name in arch_file.namelist():
		file(os.path.join(directory, os.path.split(name)[1]), 'wb').write(arch_file.read(name))

	arch_file.close()
	os.unlink(fname)

def extract_file(fname):
	"""Extract a single file"""
	extension = os.path.splitext(fname).lower()
	try:
		if extension == 'zip':
			unzip_file(fname)
		elif extension == 'rar':
			unrar_file(fname)
	except: pass

def extract_all(fname):
	"""Extract the file and if it contains more archives in it, extract those as well"""
	extract_file(fname)
	directory, arch_name = os.path.split(fname)
	if directory == '':
		directory = '.'
	for arch_name in glob.glob(os.path.join(directory, '*.zip')):
		extract_file(arch_name)
	for arch_name in glob.glob(os.path.join(directory, '*.rar')):
		extract_file(arch_name)

def get_from_flazx(url):
	"""Get a book from www.flazx.com"""
	data = urllib2.urlopen(url).read()
	parser = URLLister()
	parser.feed(data)
	for script_url in parser.script_urls:
		if script_url.find('www.flazx.info') != -1:
			url = script_url
			break
	parser.close()
	get_next_line = False
	book_name = 'Unknown'
	year = 'None'
	for line in data.splitlines():
		if line.find('<title>') != -1:
			book_name = line.split('<title>FlazX - ')[1].split('<')[0].strip()
		if get_next_line:
			year = line.split('<')[0].split()[2]
			break
		if line.find('Release Date') != -1:
			get_next_line = True
	del data
	book_name = get_valid_name(book_name)
	mydir = '/' + book_name + ' (' + year + ')'
	try: os.mkdir(config.dl_dir + mydir)
	except: pass
	
	for line in urllib2.urlopen(url).read().splitlines():
		if line.find('href') != -1:
			url = line.split("href='")[1].split("'")[0]
			break
	
	parser.feed(urllib2.urlopen(url).read())
	for src_url in parser.urls:
		if src_url.find('www.flazx.info') != -1:
			url = src_url
	parser.close()
	local_file = get_filename_from_url(url)
	file_to_save = os.path.join(config.dl_dir + mydir, local_file)
	download_file_with_retries(file_to_save, url)
	extract_all(file_to_save)
	config.report.report_finish(local_file)

url_filter = 'imagevenue.com|uploaded.to|jpghosting.com|imagetoker.com|www.jigsawshare.com|usercash.com|rapidshare.com|hotlinkimage.com|jimslip.com|www.flazx.com|www.beautyandthesenior.com|www.imagebeaver.com|imagesocket.com'
all_url_filter = url_filter + "|www.pornix.eu"
def get_files_from_link(url):
	"""Get all the files we support from a URL"""
	links = get_links(url, all_url_filter)
	stat_file = file("downloading.txt", "w")
	for l in links:
		stat_file.write(l + config.eol)
	stat_file.close()
	
	for l in links:
		get_with_retry(l)
	os.unlink("downloading.txt")

def get_from_porix(url):
	"""Get files from porix"""
	links = get_links(url, url_filter)
	new_dir = urlparse.urlparse(url)[2].split(".html")[0].split('/')[-1]
	
	created_dir = creat_unique_directory(new_dir)
	stat_file = file(os.path.join(created_dir, 'downloading.txt'), 'w')
	curr_dl_dir = config.dl_dir
	try:
		config.dl_dir = created_dir
		for l in links:
			stat_file.write(l + config.eol)
	
		for l in links:
			get_with_retry(l)
	finally:
		stat_file.close()
		os.unlink(os.path.join(created_dir, 'downloading.txt'))
		config.dl_dir = curr_dl_dir

# Table mapping response codes to messages; entries have the
# form {code: (shortmessage, longmessage)}.
http_responses = {
	100: ('Continue', 'Request received, please continue'),
	101: ('Switching Protocols',
		'Switching to new protocol; obey Upgrade header'),

	200: ('OK', 'Request fulfilled, document follows'),
	201: ('Created', 'Document created, URL follows'),
	202: ('Accepted',
		'Request accepted, processing continues off-line'),
	203: ('Non-Authoritative Information', 'Request fulfilled from cache'),
	204: ('No Content', 'Request fulfilled, nothing follows'),
	205: ('Reset Content', 'Clear input form for further input.'),
	206: ('Partial Content', 'Partial content follows.'),

	300: ('Multiple Choices',
		'Object has several resources -- see URI list'),
	301: ('Moved Permanently', 'Object moved permanently -- see URI list'),
	302: ('Found', 'Object moved temporarily -- see URI list'),
	303: ('See Other', 'Object moved -- see Method and URL list'),
	304: ('Not Modified',
		'Document has not changed since given time'),
	305: ('Use Proxy',
		'You must use proxy specified in Location to access this resource.'),
	307: ('Temporary Redirect',
		'Object moved temporarily -- see URI list'),

	400: ('Bad Request',
		'Bad request syntax or unsupported method'),
	401: ('Unauthorized',
		'No permission -- see authorization schemes'),
	402: ('Payment Required',
		'No payment -- see charging schemes'),
	403: ('Forbidden',
		'Request forbidden -- authorization will not help'),
	404: ('Not Found', 'Nothing matches the given URI'),
	405: ('Method Not Allowed',
		'Specified method is invalid for this server.'),
	406: ('Not Acceptable', 'URI not available in preferred format.'),
	407: ('Proxy Authentication Required', 'You must authenticate with this proxy before proceeding.'),
	408: ('Request Timeout', 'Request timed out; try again later.'),
	409: ('Conflict', 'Request conflict.'),
	410: ('Gone',
		'URI no longer exists and has been permanently removed.'),
	411: ('Length Required', 'Client must specify Content-Length.'),
	412: ('Precondition Failed', 'Precondition in headers is false.'),
	413: ('Request Entity Too Large', 'Entity is too large.'),
	414: ('Request-URI Too Long', 'URI is too long.'),
	415: ('Unsupported Media Type', 'Entity body in unsupported format.'),
	416: ('Requested Range Not Satisfiable',
		'Cannot satisfy request range.'),
	417: ('Expectation Failed',
		'Expect condition could not be satisfied.'),

	500: ('Internal Server Error', 'Server got itself in trouble'),
	501: ('Not Implemented', 'Server does not support this operation'),
	502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),
	503: ('Service Unavailable',
		'The server cannot process the request due to a high load'),
	504: ('Gateway Timeout',
		'The gateway server did not receive a timely response'),
	505: ('HTTP Version Not Supported', 'Cannot fulfill request.')}

supported_urls = [('imagevenue.com', get_from_imagevenue),
		('uploaded.to', get_from_uploadedto),
		('jpghosting.com', get_from_jpghosting),
		('imagetoker.com', get_regular),
		('www.jigsawshare.com', get_regular),
		('usercash.com', get_from_usercash),
		('www.pornix.eu', get_from_porix),
		('//rapidshare.com', get_from_rapidshare),
		('www.rapidshare.com', get_from_rapidshare),
		('hotlinkimage.com', get_from_hotlinkimage),
		('jimslip.com', get_from_jimslip),
		('www.beautyandthesenior.com', get_from_jimslip),
		('www.flazx.com/ebook', get_from_flazx),
		('www.imagebeaver.com/view.php', get_from_imagebeaver),
		('//imagesocket.com', get_from_imagesocket)]

def get_with_retry(url, fname = None):
	"""Attempt to get a URL. Do retries if necessary"""
	getter_func = None
	for site, func in supported_urls:
		if url.find(site) != -1:
			getter_func = func
			break
	
	if getter_func == None:
		config.report.report_error("Unsupported URL provided: %s" % url)
		return
	
	config.report.report_start(url)
	curr = 0
	while curr < config.retries:
		try:
			getter_func(url)
			return
		except socket.error, msg:
			config.report.report_error(msg)
			curr = curr + 1
			time.sleep(1)
			continue
		except CantDownload, msg:
			config.report.report_error(msg)
			break
		except CantResume, msg:
			config.report.report_error(msg)
			curr = curr + 1
			continue
		except urllib2.HTTPError, msg:
			config.report.report_error("%d: %s" % (msg.code, http_responses[msg.code][0]))
			break
		except urllib2.URLError, msg:
			config.report.report_error(msg.reason)
			break
		except NoRapidaAuth, msg:
			config.report.report_error(msg)
			break
	
	if fname == None:
		fname = 'failed.txt'
	else:
		fname = '%s_failed.txt' % fname
	
	fd = file(fname, 'a')
	fd.write(url)
	fd.write(config.eol)
	fd.close()
	config.report.report_error("Unable to get: %s" % url)

yapget_version = "1.1"

def main():
	"""Application main"""
	usage="usage: %prog [options]\nDownload files from:\n\t" \
		+ all_url_filter.replace('|', '\n\t') \
		+ "\nUse proxy if http_proxy is set (must be 'http://host:port', " \
		"\nbut can also have user/passwd) or registry on Windows\n" \
		"Note that rapidshare authentication over proxy is not supported (but D/L is)\n" \
		"If some of the downloads fail, a text file with failed urls will be created"
	parser = OptionParser(usage=usage, version="%prog " + yapget_version)
	parser.add_option("-s", "--source-url", dest="source_url",
		help="get links to download from a URL", metavar="URL")
	parser.add_option("-i", "--input-file", dest="fname",
		help="get links to download from a file", metavar="FILE")
	parser.add_option("-u", "--url", dest="url_name",
		help="get a single url", metavar="URL")
	parser.add_option("-l", "--rs-login", dest="rs_user",
		help="rapidshare username for authentication " \
		+ "(only needed the first time you download from rapidshare)",
		metavar="USERNAME")
	parser.add_option("-p", "--rs-password", dest="rs_passwd",
		help="rapidshare password for authentication " \
		"(only needed the first time you download from rapidshare)",
		metavar="PASSWORD")
	parser.add_option("-n", "--no-proxy", action="store_true", dest="no_proxy",
		help="do not use proxy even if such information is present",
		default=False)
	parser.add_option("-t", "--timeout", dest="tmo",
		help="timeout value for network operations (default 15)",
		default=15, metavar="SECONDS")
	parser.add_option("-r", "--retries", dest="retries",
		help="how many times to retry before giving up on a file (default 3)",
		default=3)
	parser.add_option("-c", "--cookie-dir", dest="cookie_dir",
		help="where to get/save cookie file (default '.')",
		default='.', metavar="DIRECTORY")
	parser.add_option("-q", "--quiet", action="store_true", dest="quiet",
		help="do not print anything to the console",
		default=False)
	parser.add_option("-d", "--dl-dir", dest="dl_dir",
		help="which directory to download files to (will be created if does not exist) (default '.')",
		default='.', metavar="DIRECTORY")
	parser.add_option("-f", "--force-overwrite", action="store_true", dest="overwrite",
		help="overwrite file if exists (default is to create a unique filename/directory)",
		default=False)
	parser.add_option("-P", "--explicit-proxy", dest="explicit_proxy",
		help="specify explicitely a proxy - must be in format of http_proxy",
		metavar='PROXY')
	(options, args) = parser.parse_args()

	if options.fname:
		options.fname = os.path.abspath(options.fname)
	if options.quiet:
		reporter = StatusReport()
	else:
		reporter = StatusReportConsole()
	config.initialize(options.rs_user, options.rs_passwd, options.retries, reporter, options.overwrite, options.dl_dir, options.tmo)
	
	config.init_comm(options.cookie_dir, options.no_proxy, options.explicit_proxy)
	try:
		if options.fname:
			for line in file(options.fname, 'r').readlines():
				line = line.strip(' \r\n\t')
				if urlparse.urlparse(line)[0] == 'http':
					get_with_retry(line, options.fname)
		elif options.url_name:
			get_with_retry(options.url_name)
		elif options.source_url:
			get_files_from_link(options.source_url)
		else:
			parser.print_help()
			return 1
	except ValueError, msg:
		config.report.report_error("Invalid argument: %s" % msg)
		return 1
	return 0

if __name__ == "__main__":
	ret = 0
	try:
		ret = main()
	except KeyboardInterrupt:
		print "Quiting"
	sys.exit(ret)

