
try:
	import pytesser
	import Image
except: pass

from sgmllib import SGMLParser

class PostLister(SGMLParser):
	"""Class for getting all the links on an HTML page"""
	def reset(self):
		SGMLParser.reset(self)
		self.vars = {}
		self.action = None
	
	def start_input(self, attrs):
		if len(attrs) == 3 and attrs[0][0] == 'type' and attrs[0][1] == 'hidden':
			self.vars[attrs[1][1]] = attrs[2][1]

	def start_form(self, attrs):
		action = None
		for k, v in attrs:
			if k == 'action':
				action = v
		if action:
			self.action = action

class MegaPlugin:
	def __init__(self):
		# put all the links this plugins supports here - it will be used to filter supported URLs
		self.supported_sites = ['www.megaupload.com', 'www.megarotic.com', 'www.sexuploader.com']
		# the URLs we support
		self.supported_urls = ['www.megaupload.com/?d=', 'www.megarotic.com/?d=', 'www.sexuploader.com/?d=']
		self.cookies_to_block = ['.megaupload.com']

	def get(self, url):
		"""Get the file from the url"""
		chunk = ''
		ocr_succeeded = False
		while not ocr_succeeded:
			if config.stop_getting:
				config.stop_getting = False
				return
			data = urllib2.urlopen(url).read()

			for line in data.splitlines():
				if line.find('Unfortunately, the link you have clicked is not available') != -1 \
					or line.find('The file you are trying to access is temporarily unavailable') != -1:
					raise TemporarilyUnavailable('The link is not available at the moment')
				
			parser = URLLister()
			parser.feed(data)
			capcha_url = None
			for img_url in parser.image_urls:
				if img_url.find('/capgen.php?') != -1:
					capcha_url = urlparse.urljoin(url, img_url)
					break
			parser.close()
			if not capcha_url:
				config.report.report_error('Could not get image URL')
				continue

			parser = PostLister()
			parser.feed(data)
			post_data = parser.vars

			fname = self._save_capcha_to_file(capcha_url)
			im = Image.open(fname)
			post_data['imagestring'] = pytesser.image_to_string(im).replace(' ', '').strip(' \n').lower()
			if self._ocr_failed(post_data['imagestring']):
				config.report.report_warning('OCR failed, retrying')
				continue

			data = urllib2.urlopen(parser.action, urllib.urlencode(post_data)).read()
			parser.close()
			chunk = ''
			get_lines = 4
			start_counting = False
			for line in data.splitlines():
				if line.find('Please enter') != -1: # OCR was wrong
					config.report.report_warning('OCR failed, retrying')
					break
				elif line.find('Click here to download') != -1 and line.find('downloadlink') == -1:
					start_counting = True
					continue

				if start_counting and get_lines:
					chunk += line + '\n'
					get_lines -= 1

				if get_lines == 0:
					break
			if get_lines == 0:
				ocr_succeeded = True
			else:
				continue
			chunk = chunk.strip(' \n')

		var1 = chunk.splitlines()[0].strip(' ;')
		var2 = chunk.splitlines()[1].strip(' ;')
		var1_name = ' ' + var1.split(' = ')[0].split()[1] + ' '
		var2_name = ' ' + var2.split(' = ')[0].split()[1] + ' '
		expr1 = ' ' + var1.split(' = ')[1].replace('String.fromCharCode', 'unichr(int').replace('abs', 'fabs').replace('Math', 'math') + ').encode("utf-8") '
		expr2 = ' ' + var2.split(' = ')[1].replace('String.fromCharCode', 'unichr(int').replace('Math', 'math') + ').encode("utf-8") '
		val = chunk.splitlines()[2].split('href')[1].split('onclick')[0].strip(' =').replace('"', '\'')
		expr = val.replace(var1_name, expr1).replace(var2_name, expr2)
		url = eval(expr)
		config.report.report_warning('waiting for site to generate D/L link')
		for x in xrange(46):
			if config.stop_getting:
				config.stop_getting = False
				return
			time.sleep(1)
		config.report.report_warning('starting to D/L link')
		local_file = url.split('/')[-1]
		try:
			content = download_file_with_retries(local_file, url)
		except urllib2.HTTPError, e:
			if e.code == 503:
				config.report.report_warning('going to sleep for 15 minutes, since limit exceeded')
				for x in xrange(15 * 60):
					if config.stop_getting:
						config.stop_getting = False
						return
					time.sleep(1)
			raise e

		if content == "application/octet-stream":
			config.report.report_finish(local_file)
		else:
			raise CantDownload("File %s is not what was expected" % local_file)

	def url_matches(self, url):
		"""Check whether we support this URL"""
		# see if the url contains a substring we are looking for
		for l in self.supported_urls:
			if url.find(l) != -1:
				return True
		return False

	def version(self):
		"""Return plugin version - when interface changes, the version changes"""
		return 1

	def _save_capcha_to_file(self, url):
		fname = os.path.join(config.dl_dir, 'capcha.gif')

		try: os.unlink(fname)
		except: pass

		f = file(fname, 'wb')
		f.write(urllib2.urlopen(url).read())
		del f
		return fname

	def _ocr_failed(self, text):
		if len(text) != 3:
			return True

		for c in text:
			if not c.isalnum():
				return True

		return False

modules = []

if 'pytesser' in dir() and 'Image' in dir():
	modules.append(MegaPlugin())

