#!/usr/bin/python
# Tested with python 2.4 and 2.5

import urllib, urllib2, urlparse, sys, os, socket, re, cookielib, time, math, glob, zipfile, rarfile, imp
from sgmllib import SGMLParser
from optparse import OptionParser

__all__ = ["config", "plugins", "get_with_retry", "is_valid_proxy_url",
	"get_files_from_link", "yapget_version", "get_links", "find_file"]

def is_valid_proxy_url(url):
	"""Check if a URL describes a valid proxy URL in the format of http_proxy variable"""
	parsed_url = urlparse.urlparse(url)
	if parsed_url[0] != 'http':
		return False
	if not parsed_url[1]:
		return False
	if not urllib.splituser(parsed_url[1])[1]:
		return False
	return True

def find_file(fname):
	"""Find a file in a number of default locations"""
	_dirs = [
		os.path.split(sys.argv[0])[0], # where our executable is
		os.path.expanduser('~'), # user's home
	]
	for file_dir in _dirs:
		file_in_dir = os.path.join(file_dir, fname)
		if os.path.isfile(file_in_dir) and os.access(file_dir, os.W_OK or os.R_OK):
			return os.path.abspath(file_in_dir)
	if sys.platform.startswith('win') and os.access(_dirs[0], os.W_OK or os.R_OK):
		return os.path.abspath(os.path.join(_dirs[0], fname))
	else:
		return os.path.abspath(os.path.join(_dirs[1], fname))

class StatusReport:
	"""Base class for reporting download progress"""
	def report_start(self, url):
		pass

	def report_finish(self, fname):
		pass

	def report_progress(self, percent_str, counter, total_len_str, speed_str, eta_str):
		pass

	def report_completion(self, total_len_str, speed_str, time_str):
		pass

	def report_error(self, str):
		pass

	def report_warning(self, str):
		pass

class StatusReportConsole:
	"""Print error messages to console"""
	def report_start(self, url):
		print "Getting: ", url

	def report_finish(self, fname):
		print "Saved: ", fname

	def report_progress(self, percent_str, counter, total_len_str, speed_str, eta_str):
		sys.stdout.write('\rRetrieving: %5s%% (%8s of %s) at %8s/s ETA %s ' % (percent_str, counter, total_len_str, speed_str, eta_str))
		sys.stdout.flush()

	def report_completion(self, total_len_str, speed_str, time_str):
		sys.stdout.write('\rGot %s at %8s/s during %s                                \n' % (total_len_str, speed_str, time_str))
		sys.stdout.flush()

	def report_error(self, str):
		sys.stderr.write("Error: %s\n" % str)

	def report_warning(self, str):
		print str

class Configuration:
	"""Keep track of our configuration received from user"""
	def __init__(self):
		self.rs_user = None
		self.rs_passwd = None
		self.cookies_loaded = False
		self.cookies = None
		self.cookie_file = 'cookies.lwp'
		self.retries = 3
		self.report = None
		self.dl_dir = '.'
		self.cookie_dir = '.'
		self.stop_getting = False
		self.overwrite_existing = False
		self.tmo = 15.0
		self.no_proxy = False
		self.explicit_proxy = None
		self._set_eol()

	def _set_eol(self):
		if sys.platform.startswith('win') or sys.platform in ('mingw', 'cygwin', 'dos', 'os2'):
			self.eol = '\r\n'
		elif sys.platform in ('mac', 'darwin'):
			self.eol = '\r'
		else:
			self.eol = '\n'

	def initialize(self, rs_user, rs_passwd, retries, report, overwrite, dl_dir, tmo):
		self.rs_user = rs_user
		self.rs_passwd = rs_passwd
		self.report = report
		self.overwrite_existing = overwrite
		self.set_retries(retries)
		self.set_dl_dir(dl_dir)
		self.set_tmo(tmo)

	def set_retries(self, retries):
		self.retries = int(retries)
		
	def set_dl_dir(self, dl_dir):
		if not os.path.isdir(dl_dir):
			os.mkdir(dl_dir)
		self.dl_dir = os.path.abspath(dl_dir)

	def set_tmo(self, tmo):
		self.tmo = float(tmo)
		socket.setdefaulttimeout(self.tmo)

	def init_comm(self, cookie_dir, no_proxy, explicit_proxy = None):
		"""Initialize all urllib2 stuff"""
		self._set_cookiedir(cookie_dir)
		self.no_proxy = no_proxy
		self.explicit_proxy = explicit_proxy

		if explicit_proxy:
			opener = urllib2.build_opener(urllib2.ProxyHandler({'http':explicit_proxy}), urllib2.HTTPCookieProcessor(self.cookies))
		else:
			if no_proxy:
				opener = urllib2.build_opener(urllib2.ProxyHandler({}), urllib2.HTTPCookieProcessor(self.cookies))
			else:
				opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookies))

		# Internet Explorer 6.0 on XP
		opener.addheaders = [('User-Agent', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322)')]
	
		# Now all calls to urllib2.urlopen will use our opener
		urllib2.install_opener(opener)

	def _set_cookiedir(self, cookie_dir):
		self.cookies = cookielib.LWPCookieJar(os.path.join(cookie_dir, self.cookie_file))
		self.cookie_dir = cookie_dir
		if self.cookies_loaded:
			self.cookies_loaded = False
			CookiesLoaded()

	def __getstate__(self):
		return (self.rs_user, self.rs_passwd, self.cookies_loaded, self.cookie_file, self.retries, self.dl_dir, self.cookie_dir, self.overwrite_existing, self.tmo, self.no_proxy, self.explicit_proxy)

	def __setstate__(self, state):
		self.rs_user, self.rs_passwd, self.cookies_loaded, self.cookie_file, self.retries, self.dl_dir, self.cookie_dir, self.overwrite_existing, self.tmo, self.no_proxy, self.explicit_proxy = state
		self._set_eol()
		self.set_tmo(self.tmo)
		self.set_dl_dir(self.dl_dir)
		self.init_comm(self.cookie_dir, self.no_proxy, self.explicit_proxy)

config = Configuration()

def CookiesLoaded():
	"""Load cookies if needed"""
	if not config.cookies_loaded:
		try:
			config.cookies.load()
			config.cookies_loaded = True
		except IOError:
			config.report.report_warning("Warning: No cookie file present")
	
	return config.cookies_loaded

class CantResume(Exception):
	"""An exception that is thrown when we can't resume download"""
	pass

class NoAuth(Exception):
	"""An exception that is thrown when authentication failed"""
	pass

class CantDownload(Exception):
	"""An exception occured while trying to download"""
	pass

def find_attr(data, attr):
	"""Find the attribute value from a URL"""
	if data.startswith('http://'):
		data = urlparse.urlparse(data)[4]
	for arg in data.split('&'):
		var, val = urllib.splitvalue(arg)
		if var == attr:
			return val
	return None

def html2ascii(text):
	"""Returns the ASCII decoded version of the given HTML string"""
	_xmlCodes = [
		['"', '&quot;'],
		['>', '&gt;'],
		['<', '&lt;'],
		['&', '&amp;'],
	]
	for code in _xmlCodes:
		text = text.replace(code[1], code[0])
	return text

class URLLister(SGMLParser):
	"""Class for getting all the links on an HTML page"""
	def reset(self):
		SGMLParser.reset(self)
		self.urls = []
		self.image_urls = []
		self.script_urls = []
	
	def start_a(self, attrs):
		href = [v for k, v in attrs if k == 'href']
		if href:
			self.urls.extend(href)
	
	def start_img(self, attrs):
		src = [v for k, v in attrs if k == 'src']
		if src:
			self.image_urls.extend(src)
	
	def start_script(self, attrs):
		src = [v for k, v in attrs if k == 'src']
		if src:
			self.script_urls.extend(src)

def get_links(url, filter = ''):
	"""Get all the links on page"""
	parser = URLLister()
	parser.feed(urllib2.urlopen(url).read())
	result = []
	pattern = re.compile(filter)
	for url in parser.urls:
		if pattern.search(url) and url not in result:
			result.append(html2ascii(url))
	
	for url in parser.image_urls:
		if pattern.search(url) and url not in result:
			result.append(html2ascii(url))
	
	parser.close()
	return result

def open_unique_file(fname, index = 0):
	"""Open a file and make it's filename unique if another one with the same name exists"""
	if index != 0:
		local_file = os.path.join(config.dl_dir, "%s_%d" % (fname, index))
	else:
		local_file = os.path.join(config.dl_dir, fname)
	
	if config.overwrite_existing:
		return file(local_file, 'wb'), local_file
	
	if os.path.exists(local_file):
		return open_unique_file(fname, index + 1)
	
	return file(local_file, 'wb'), local_file

def creat_unique_directory(directory, index = 0):
	"""Create a directory that is unique if another one with the same name exists"""
	if index != 0:
		local_dir = os.path.join(config.dl_dir, "%s_%d" % (directory, index))
	else:
		local_dir = os.path.join(config.dl_dir, directory)
	
	if config.overwrite_existing:
		if not os.path.isdir(local_dir):
			os.mkdir(local_dir)
		return local_dir
	
	if os.path.exists(local_dir):
		return creat_unique_directory(directory, index + 1)

	os.mkdir(local_dir)
	return local_dir

const_1k = 1024
const_initial_block_size = 10 * const_1k
const_epsilon = 0.0001

def new_block_size(before, after, bytes):
	"""Calculate new block size based on previous block size"""
	new_min = max(bytes / 2.0, 1.0)
	new_max = max(bytes * 2.0, 1.0)
	dif = after - before
	if dif < const_epsilon:
		return int(new_max)
	rate = bytes / dif
	if rate > new_max:
		return int(new_max)
	if rate < new_min:
		return int(new_min)
	return int(rate)

def optimum_k_exp(num_bytes):
	"""Get optimum 1k exponent to represent a number of bytes"""
	global const_1k
	if num_bytes == 0:
		return 0
	return long(math.log(num_bytes, const_1k))

def format_bytes(num_bytes):
	"""Get optimum representation of number of bytes"""
	global const_1k
	try:
		exp = optimum_k_exp(num_bytes)
		suffix = 'bkMGTPEZY'[exp]
		if exp == 0:
			return '%s%s' % (num_bytes, suffix)
		converted = float(num_bytes) / float(const_1k**exp)
		return '%.2f%s' % (converted, suffix)
	except IndexError:
		sys.exit('Error: internal error formatting number of bytes.')

def format_time(start, end):
	"""Format time difference and return it in string format as MM:SS"""
	dif = end - start
	time_mins = dif / 60
	time_secs = dif % 60
	if time_mins > 99:
		return '--:--'
	return '%02d:%02d' % (time_mins, time_secs)

def calc_eta(start, now, total, current):
	"""Calculate ETA and return it in string format as MM:SS"""
	dif = now - start
	if current == 0 or dif < const_epsilon:
		return '--:--'
	rate = float(current) / dif
	eta = long((total - current) / rate)
	eta_mins = eta / 60
	eta_secs = eta % 60
	if eta_mins > 99:
		return '--:--'
	return '%02d:%02d' % (eta_mins, eta_secs)

def calc_speed(start, now, bytes):
	"""Calculate speed and return it in string format"""
	dif = now - start
	if bytes == 0 or dif < const_epsilon:
		return 'N/A b'
	return format_bytes(float(bytes) / dif)

def download_file(dst_file, response):
	"""Download data from http stream and save it to a file"""
	byte_counter = 0
	block_size = const_initial_block_size
	start_time = time.time()
	try:
		total_len = long(response.headers.dict["content-length"])
		total_len_str = format_bytes(total_len)
	except KeyError:
		total_len = None
		total_len_str = 'N/A'
	
	while True:
		if total_len is not None:
			percent = float(byte_counter) / float(total_len) * 100.0
			percent_str = '%.1f' % percent
			eta_str = calc_eta(start_time, time.time(), total_len, byte_counter)
		else:
			percent_str = '---.-'
			eta_str = '--:--'
		
		counter = format_bytes(byte_counter)
		speed_str = calc_speed(start_time, time.time(), byte_counter)
		config.report.report_progress(percent_str, counter, total_len_str, speed_str, eta_str)
		
		if config.stop_getting:
			break
		before = time.time()
		try:
			data = response.read(block_size)
		except socket.error:
			break
		after = time.time()
		dl_bytes = len(data)
		if dl_bytes == 0:
			break
		byte_counter += dl_bytes
		dst_file.write(data)
		block_size = new_block_size(before, after, dl_bytes)

	speed_str = calc_speed(start_time, after, byte_counter)
	time_str = format_time(start_time, after)
	if not config.stop_getting:
		config.report.report_completion(total_len_str, speed_str, time_str)
	return byte_counter

def download_file_with_retries(fname, remote_url, is_post = False):
	"""Download a file and perform retries if necessary, also closes the connection"""
	if is_post:
		response = urllib2.urlopen(remote_url, '')
	else:
		response = urllib2.urlopen(remote_url)
	
	dst_file, filename = open_unique_file(fname)
	try:
		if config.stop_getting:
			config.stop_getting = False
			return
		
		try:
			total_size = long(response.headers.dict["content-length"])
		except KeyError:
			config.report.report_warning("Could not get content size")
			download_file(dst_file, response)
			return
		
		got = 0
		while got < total_size:
			if config.stop_getting:
				config.stop_getting = False
				return
			if got != 0:
				config.report.report_warning("Retrying %s from %d" % (filename, got))
				req = urllib2.Request(response.geturl())
				req.add_header("Range", "bytes=%d-" % got)
				response = urllib2.urlopen(req)
				if response.code != 206: # server does not suppor partial requests
					dst_file.close()
					os.unlink(filename)
					raise CantResume("Server does not support resumes")
			got += download_file(dst_file, response)
	finally:
		if not dst_file.closed:
			dst_file.close()

def get_img_url(url, filter):
	"""Get URL of an image based on some filter"""
	res = ''
	parser = URLLister()
	parser.feed(urllib2.urlopen(url).read())
	for img_url in parser.image_urls:
		if img_url.find(filter) != -1:
			res = img_url
			break
	parser.close()
	return res

def get_filename_from_url(url):
	"""Get the filename part from the URL"""
	parsed_url = urlparse.urlparse(url)
	if parsed_url[4] != '':
		local_file = parsed_url[4].split('=')[-1]
	else:
		local_file = parsed_url[2].split('/')[-1]
	loc = local_file.find('.html')
	if loc != -1:
		local_file = local_file[:loc]
	return local_file

def cookie_present(site):
	"""Check if we have cookies for a certain file"""
	for index, data in enumerate(config.cookies):
		if data.domain.find(site) != -1:
			return True
	return False

def get_valid_name(name):
	"""Get valid file/directory name"""
	return name.replace('&', 'and').replace(':', '').replace('|', '').replace('/', '').replace('\\', '').replace('<', '').replace('>', '').replace('"', '').replace('?', '').replace('*', '')

def unzip_file(fname):
	"""Unzip a single file and delete the original zip afterwards"""
	directory, zip_name = os.path.split(fname)
	if directory == '':
		directory = '.'
	
	arch_file = zipfile.ZipFile(fname, 'r')
	if arch_file.testzip(): # either the archive is corrupt, or we can't read it...
		return

	for name in arch_file.namelist():
		file(os.path.join(directory, os.path.split(name)[1]), 'wb').write(arch_file.read(name))

	arch_file.close()
	os.unlink(fname)

def unrar_file(fname):
	"""Unrar a single file and delete the original rar afterwards"""
	directory, rar_name = os.path.split(fname)
	if directory == '':
		directory = '.'
	
	arch_file = rarfile.RarFile(fname, 'r')

	for name in arch_file.namelist():
		file(os.path.join(directory, os.path.split(name)[1]), 'wb').write(arch_file.read(name))

	arch_file.close()
	os.unlink(fname)

def extract_file(fname):
	"""Extract a single file"""
	extension = os.path.splitext(fname)[1].lower()
	try:
		if extension == 'zip':
			unzip_file(fname)
		elif extension == 'rar':
			unrar_file(fname)
	except: pass

def extract_all(fname):
	"""Extract the file and if it contains more archives in it, extract those as well"""
	extract_file(fname)
	directory, arch_name = os.path.split(fname)
	if directory == '':
		directory = '.'
	for arch_name in glob.glob(os.path.join(directory, '*.zip')):
		extract_file(arch_name)
	for arch_name in glob.glob(os.path.join(directory, '*.rar')):
		extract_file(arch_name)

def get_files_from_link(url):
	"""Get all the files we support from a URL"""
	links = get_links(url, plugins.url_filter)
	stat_file = file("downloading.txt", "w")
	for l in links:
		stat_file.write(l + config.eol)
	stat_file.close()
	
	for l in links:
		get_with_retry(l)
	os.unlink("downloading.txt")

def get_from_source_link(url):
	"""Get files from URL containing the links"""
	links = get_links(url, plugins.url_filter)
	new_dir = urlparse.urlparse(url)[2].split(".htm")[0].split('/')[-1]
	
	created_dir = creat_unique_directory(new_dir)
	stat_file = file(os.path.join(created_dir, 'downloading.txt'), 'w')
	curr_dl_dir = config.dl_dir
	try:
		config.dl_dir = created_dir
		for l in links:
			stat_file.write(l + config.eol)
	
		for l in links:
			get_with_retry(l)
	finally:
		stat_file.close()
		os.unlink(os.path.join(created_dir, 'downloading.txt'))
		config.dl_dir = curr_dl_dir

# Table mapping response codes to messages; entries have the
# form {code: (shortmessage, longmessage)}.
http_responses = {
	100: ('Continue', 'Request received, please continue'),
	101: ('Switching Protocols',
		'Switching to new protocol; obey Upgrade header'),

	200: ('OK', 'Request fulfilled, document follows'),
	201: ('Created', 'Document created, URL follows'),
	202: ('Accepted',
		'Request accepted, processing continues off-line'),
	203: ('Non-Authoritative Information', 'Request fulfilled from cache'),
	204: ('No Content', 'Request fulfilled, nothing follows'),
	205: ('Reset Content', 'Clear input form for further input.'),
	206: ('Partial Content', 'Partial content follows.'),

	300: ('Multiple Choices',
		'Object has several resources -- see URI list'),
	301: ('Moved Permanently', 'Object moved permanently -- see URI list'),
	302: ('Found', 'Object moved temporarily -- see URI list'),
	303: ('See Other', 'Object moved -- see Method and URL list'),
	304: ('Not Modified',
		'Document has not changed since given time'),
	305: ('Use Proxy',
		'You must use proxy specified in Location to access this resource.'),
	307: ('Temporary Redirect',
		'Object moved temporarily -- see URI list'),

	400: ('Bad Request',
		'Bad request syntax or unsupported method'),
	401: ('Unauthorized',
		'No permission -- see authorization schemes'),
	402: ('Payment Required',
		'No payment -- see charging schemes'),
	403: ('Forbidden',
		'Request forbidden -- authorization will not help'),
	404: ('Not Found', 'Nothing matches the given URI'),
	405: ('Method Not Allowed',
		'Specified method is invalid for this server.'),
	406: ('Not Acceptable', 'URI not available in preferred format.'),
	407: ('Proxy Authentication Required', 'You must authenticate with this proxy before proceeding.'),
	408: ('Request Timeout', 'Request timed out; try again later.'),
	409: ('Conflict', 'Request conflict.'),
	410: ('Gone',
		'URI no longer exists and has been permanently removed.'),
	411: ('Length Required', 'Client must specify Content-Length.'),
	412: ('Precondition Failed', 'Precondition in headers is false.'),
	413: ('Request Entity Too Large', 'Entity is too large.'),
	414: ('Request-URI Too Long', 'URI is too long.'),
	415: ('Unsupported Media Type', 'Entity body in unsupported format.'),
	416: ('Requested Range Not Satisfiable',
		'Cannot satisfy request range.'),
	417: ('Expectation Failed',
		'Expect condition could not be satisfied.'),

	500: ('Internal Server Error', 'Server got itself in trouble'),
	501: ('Not Implemented', 'Server does not support this operation'),
	502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),
	503: ('Service Unavailable',
		'The server cannot process the request due to a high load'),
	504: ('Gateway Timeout',
		'The gateway server did not receive a timely response'),
	505: ('HTTP Version Not Supported', 'Cannot fulfill request.')}

def get_with_retry(url, fname = None):
	"""Attempt to get a URL. Do retries if necessary"""
	getter_plugin = None
	for plugin in plugins.get():
		if plugin.url_matches(url):
			getter_plugin = plugin
			break
	
	if getter_plugin == None:
		config.report.report_error("Unsupported URL provided: %s" % url)
		return
	
	config.report.report_start(url)
	curr = 0
	while curr < config.retries:
		try:
			getter_plugin.get(url)
			return
		except socket.error, msg:
			config.report.report_error(msg)
			curr += 1
			time.sleep(1)
			continue
		except CantDownload, msg:
			config.report.report_error(msg)
			break
		except CantResume, msg:
			config.report.report_error(msg)
			curr += 1
			continue
		except urllib2.HTTPError, msg:
			config.report.report_error("%d: %s" % (msg.code, http_responses[msg.code][0]))
			break
		except urllib2.URLError, msg:
			config.report.report_error(msg.reason)
			curr += 1
			time.sleep(1)
			continue
		except NoAuth, msg:
			config.report.report_error(msg)
			break
	
	if fname == None:
		fname = 'failed.txt'
	else:
		fname = '%s_failed.txt' % fname
	
	fd = file(fname, 'a')
	fd.write(url)
	fd.write(config.eol)
	fd.close()
	config.report.report_error("Unable to get: %s" % url)

yapget_version = "1.2"

class Plugins:
	"""Class for managing plugins. Note that this class has to be defined _AFTER_ all classes/functions used by plugins"""
	def __init__(self):
		self._export_list = {}
		# List all the globals we need to export to a plugin
		_list = globals()
		_exclude_list = ['self', 'export_list', 'Configuration',
				'imp', 'zipfile', 'rarfile', 'OptionParser', 'SGMLParser',
				'const_1k', 'const_epsilon', 'const_initial_block_size', 'Plugins']
		for _item in _list:
			if not _item.startswith('_') and _item not in _exclude_list:
				self._export_list[_item] = _list[_item]

		self._plugins = []
		self.url_filter = ''
		self.sites = ''
		self._py_desc = None
		self._pyc_desc = None
		self._pyo_desc = None

		for attrs in imp.get_suffixes():
			if attrs[0] == '.py':
				self._py_desc = attrs
			elif attrs[0] == '.pyc':
				self._pyc_desc = attrs
			elif attrs[0] == '.pyo':
				self._pyo_desc = attrs

	def get(self):
		return self._plugins

	def _prepare_plugin(self, m):
		#setattr(sys.modules[__name__], name, m) # make the module visible
		for item in self._export_list:
			setattr(m, item, self._export_list[item])
		if 'modules' in dir(m):
			mods = m.modules
			add_url = True
		else:
			mods = m.sources
			add_url = False

		for plugin in mods:
			if plugin.version() != 1:
				continue
			self._plugins.append(plugin)
			for name in plugin.supported_sites:
				self.sites += '\t' + name + '\n'
			if add_url:
				for name in plugin.supported_urls:
					self.url_filter += name + '|'

	def load_plugins(self, directory):
		"""Load all modules from directory"""
		imp.acquire_lock()
		for attrs in [self._py_desc, self._pyc_desc, self._pyo_desc]:
			if not attrs:
				continue
			for module in glob.glob(os.path.join(directory, '*' + attrs[0])):
				name = os.path.split(module)[1].replace(attrs[0], '')
				if name in sys.modules: # already got it
					continue
				fp = file(module, attrs[1])
				m = imp.load_module(name, fp, module, attrs)
				fp.close()
				contents = dir(m)
				if 'modules' not in contents and 'sources' not in contents:
					del m
					continue
				self._prepare_plugin(m)
		imp.release_lock()

		self.url_filter = self.url_filter.strip(' |')
		self.sites = self.sites.strip(' \n\t')

	def initialize(self):
		_dirs = [
			os.path.split(sys.argv[0])[0], # where our executable is
			'/usr/local/yapget',
			'/usr/yapget',
			os.path.expanduser('~'),
		]
		for plugin_dir in _dirs:
			file_in_dir = os.path.join(plugin_dir, 'plugins')
			if os.path.isdir(file_in_dir):
				self.load_plugins(file_in_dir)

plugins = Plugins()

def main():
	"""Application main"""
	plugins.initialize()
	usage="usage: %prog [options]\nDownload files from:\n\t" \
		+ plugins.sites \
		+ "\nUse proxy if http_proxy is set (must be 'http://username:password@host:port'), " \
		"\nor registry settings on Windows\n" \
		"Note that rapidshare authentication over proxy is not supported (but D/L is)\n" \
		"If some of the downloads fail, a text file with failed urls will be created"
	parser = OptionParser(usage=usage, version="%prog " + yapget_version)
	parser.add_option("-s", "--source-url", dest="source_url",
		help="get links to download from a URL", metavar="URL")
	parser.add_option("-i", "--input-file", dest="fname",
		help="get links to download from a file", metavar="FILE")
	parser.add_option("-u", "--url", dest="url_name",
		help="get a single url", metavar="URL")
	parser.add_option("-l", "--rs-login", dest="rs_user",
		help="rapidshare username for authentication " \
		+ "(only needed the first time you download from rapidshare)",
		metavar="USERNAME")
	parser.add_option("-p", "--rs-password", dest="rs_passwd",
		help="rapidshare password for authentication " \
		"(only needed the first time you download from rapidshare)",
		metavar="PASSWORD")
	parser.add_option("-n", "--no-proxy", action="store_true", dest="no_proxy",
		help="do not use proxy even if such information is present",
		default=False)
	parser.add_option("-t", "--timeout", dest="tmo",
		help="timeout value for network operations (default 15)",
		default=15, metavar="SECONDS")
	parser.add_option("-r", "--retries", dest="retries",
		help="how many times to retry before giving up on a file (default 3)",
		default=3)
	parser.add_option("-c", "--cookie-dir", dest="cookie_dir",
		help="where to get/save cookie file (default '.')",
		default='.', metavar="DIRECTORY")
	parser.add_option("-q", "--quiet", action="store_true", dest="quiet",
		help="do not print anything to the console",
		default=False)
	parser.add_option("-d", "--dl-dir", dest="dl_dir",
		help="which directory to download files to (will be created if does not exist) (default '.')",
		default='.', metavar="DIRECTORY")
	parser.add_option("-f", "--force-overwrite", action="store_true", dest="overwrite",
		help="overwrite file if exists (default is to create a unique filename/directory)",
		default=False)
	parser.add_option("-P", "--explicit-proxy", dest="explicit_proxy",
		help="specify explicitely a proxy - must be in format of http_proxy",
		metavar='PROXY')
	(options, args) = parser.parse_args()

	if options.fname:
		options.fname = os.path.abspath(options.fname)
	if options.quiet:
		reporter = StatusReport()
	else:
		reporter = StatusReportConsole()

	if options.explicit_proxy and not is_valid_proxy_url(options.explicit_proxy):
		reporter.report_error('Invalid explicit proxy specified')
		return 1

	config.initialize(options.rs_user, options.rs_passwd, options.retries, reporter, options.overwrite, options.dl_dir, options.tmo)
	
	config.init_comm(options.cookie_dir, options.no_proxy, options.explicit_proxy)
	try:
		if options.fname:
			for line in file(options.fname, 'r').readlines():
				line = line.strip(' \r\n\t')
				if urlparse.urlparse(line)[0] == 'http':
					get_with_retry(line, options.fname)
		elif options.url_name:
			get_with_retry(options.url_name)
		elif options.source_url:
			get_files_from_link(options.source_url)
		else:
			parser.print_help()
			return 1
	except ValueError, msg:
		config.report.report_error("Invalid argument: %s" % msg)
		return 1
	return 0

if __name__ == "__main__":
	ret = 0
	try:
		ret = main()
	except KeyboardInterrupt:
		print "Quiting"
	sys.exit(ret)

