#!C:\Python26
import sys
import types

import urlparse
import urllib
import urllib2
import os.path

import time
import cookielib

import traceback

import logging

import random
import zlib

import re


#Unicode URL encoding code stolen from http://stackoverflow.com/a/804380/268006
def fixurl(url):
	# turn string into unicode
	if not isinstance(url, unicode):
		url = url.decode('utf8')

	# parse it
	parsed = urlparse.urlsplit(url)

	# divide the netloc further
	userpass, at, hostport = parsed.netloc.rpartition('@')
	user, colon1, pass_ = userpass.partition(':')
	host, colon2, port = hostport.partition(':')

	# encode each component
	scheme = parsed.scheme.encode('utf8')
	user = urllib.quote(user.encode('utf8'))
	colon1 = colon1.encode('utf8')
	pass_ = urllib.quote(pass_.encode('utf8'))
	at = at.encode('utf8')
	host = host.encode('idna')
	colon2 = colon2.encode('utf8')
	port = port.encode('utf8')
	path = '/'.join(  # could be encoded slashes!
		urllib.quote(urllib.unquote(pce).encode('utf8'), '')
		for pce in parsed.path.split('/')
	)
	query = urllib.quote(urllib.unquote(parsed.query).encode('utf8'), '=&?/')
	fragment = urllib.quote(urllib.unquote(parsed.fragment).encode('utf8'))

	# put it back together
	netloc = ''.join((user, colon1, pass_, at, host, colon2, port))
	return urlparse.urlunsplit((scheme, netloc, path, query, fragment))

# A urllib2 wrapper that provides error handling and logging, as well as cookie management. It's a bit crude, but it works.

import gzip
import StringIO


class WebGetRobust:
	COOKIEFILE = 'cookies.lwp'				# the path and filename to save your cookies in
	cj = None
	cookielib = None
	opener = None

	log = logging.getLogger("Main.Web")
	#This is going on a public SVN server. I don't want bots to see my e-mail,
	#so it's b64 encoded. Not really secure, but it stops simple webscrapers.
	'''

	import base64
	values = [		('User-Agent'		,	'xADownloader Tool - (http://code.google.com/p/xadownloader/, %s)' % base64.b64decode('eGFkb3dubG9hZGVyQGdtYWlsLmNvbQ==')),
				('language'		,	'English')
		]
	'''
	# Due to general internet people douchebaggyness, I've basically said to hell with it and decided to spoof a whole assortment of browsers
	# It should keep people from blocking this scraper *too* easily
	opera = [		('User-Agent'		,	'Mozilla/5.0 (Windows NT 6.1; en; rv:2.0) Gecko/20100101 Firefox/4.0 Opera 11.61'),
				('Accept-Language'	,	'en-US,en;q=0.9'),
				('Accept'		,	'text/html, application/xml;q=0.9, application/xhtml+xml, image/png, image/webp, image/jpeg, image/gif, image/x-xbitmap, */*;q=0.1'),
				('Accept-Encoding'	,	'gzip, deflate')
		]
	firefox = [		('User-Agent'		,	'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:8.0.1) Gecko/20100101 Firefox/8.0.1'),
				('Accept-Language'	,	'en-us,en;q=0.5'),
				('Accept'		,	'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),
				('Accept-Encoding'	,	'gzip, deflate'),
				('Accept-Charset'	,	'ISO-8859-1,utf-8;q=0.7,*;q=0.7')
		]
	chrome = [		('User-Agent'		,	'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.121 Safari/535.2'),
				('Accept-Language'	,	'en-US,en;q=0.8'),
				('Accept'		,	'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),
				('Accept-Encoding'	,	'gzip,deflate,sdch'),
				('Accept-Charset'	,	'ISO-8859-1,utf-8;q=0.7,*;q=0.3')
		]
	IE = [			('User-Agent'		,	'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0)'),
				('Accept-Language'	,	'en-US'),
				('Accept'		,	'text/html, application/xhtml+xml, */*'),
				('Accept-Encoding'	,	'gzip, deflate')
		]

	browsers = [opera, chrome, firefox, IE]

	data = None

	def __init__(self, test=False):

		self.browserHeaders = random.choice(self.browsers)

		self.testMode = test					# if we don't want to actually contact the remote server, you pass a string containing
									# pagecontent for testing purposes as test. It will get returned for any calls of getpage()

		self.data = urllib.urlencode(self.browserHeaders)

		self.cj = cookielib.LWPCookieJar()
												# This is a subclass of FileCookieJar
												# that has useful load and save methods
		if self.cj is not None:
			if os.path.isfile(self.COOKIEFILE):
				self.cj.load(self.COOKIEFILE)
				self.log.info("Loading CookieJar")

			if cookielib is not None:
				self.log.info("Installing CookieJar")
				self.log.debug(self.cj)

				self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
				#self.opener.addheaders = [('User-Agent', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)')]
				self.opener.addheaders = self.browserHeaders
				#urllib2.install_opener(self.opener)

		for cookie in self.cj:
			self.log.debug(cookie)
			#print cookie


	def chunkReport(self, bytesSoFar, chunkSize, totalSize):
		if totalSize:
			percent = float(bytesSoFar) / totalSize
			percent = round(percent * 100, 2)
			self.log.info("Downloaded %d of %d bytes (%0.2f%%)" % (bytesSoFar, totalSize, percent))
		else:
			self.log.info("Downloaded %d bytes" % (bytesSoFar))


	def chunkRead(self, response, chunkSize=2 ** 18, reportHook=None):
		contentLengthHeader = response.info().getheader('Content-Length')
		if contentLengthHeader:
			totalSize = contentLengthHeader.strip()
			totalSize = int(totalSize)
		else:
			totalSize = None
		bytesSoFar = 0
		content = ""
		while 1:
			chunk = response.read(chunkSize)
			content += chunk
			bytesSoFar += len(chunk)

			if not chunk:
				break

			if reportHook:
				reportHook(bytesSoFar, chunkSize, totalSize)

		return content


	def getpage(self, pgreq, addlHeaders = None, returnMultiple = False, callBack=None):

		# pgreq = fixurl(pgreq)
		# print pgreq
		# print type(pgreq)

		originalString = pgreq
		try:
			pgreq = pgreq.encode("utf-8")
		except:
			self.log.error("Encoding error - requested URL has unicode characters in it?")
			self.log.error("url: %s" % pgreq)

		log = self.log

		pgctnt = "Failed"
		pghandle = None

		loopctr = 0

		try:

			if addlHeaders != None:
				pgreq = urllib2.Request(pgreq, headers=addlHeaders)
			else:
				pgreq = urllib2.Request(pgreq)
		except:
			print "Invalid header or url"
			raise ValueError("Invalid headers or url")

		errored = False
		lastErr = ""

		delay = 1.5
		if not self.testMode:
			while 1:

				loopctr = loopctr + 1



				if loopctr > 3:
					log.error("Failed to retrieve Website : %s at %s All Attempts Exhausted" % (pgreq.get_full_url(), time.ctime(time.time())))
					pgctnt = "Failed"
					try:
						print("Critical Failure to retrieve page! %s at %s, attempt %s" % (pgreq.get_full_url(), time.ctime(time.time()), loopctr))
						print "Error:", lastErr
						print "Exiting"
					except:
						print "And the URL could not be printed due to an encoding error"
					break

				#print "execution", loopctr
				try:
					pghandle = self.opener.open(pgreq)					# Get Webpage

				except urllib2.HTTPError, e:								# Lotta logging
					log.warning("Error opening page: %s at %s On Attempt %s." % (pgreq.get_full_url(), time.ctime(time.time()), loopctr))
					log.warning("%s%s" % ("Error Code: ", e))

					#traceback.print_exc()
					lastErr = e
					try:
						log.warning("Error opening page: %s at %s On Attempt %s." % (pgreq.get_full_url(), time.ctime(time.time()), loopctr))
						log.warning("Error: %s, Original URL: %s" % (e, originalString))
						errored = True
					except:
						log.warning("And the URL could not be printed due to an encoding error")

					if e.code == 404:
						#print "Unrecoverable - Page not found. Breaking"
						log.critical("Unrecoverable - Page not found. Breaking")
						break

					time.sleep(delay)


				except Exception:
					errored = True
					#traceback.print_exc()
					lastErr = sys.exc_info()
					log.warning("Retreival failed. Traceback:")
					log.warning(lastErr)

					log.warning("Error Retrieving Page! - Trying again - Waiting 2.5 seconds")

					try:
						print "Error on page - %s" % originalString
					except:
						print "And the URL could not be printed due to an encoding error"

					time.sleep(delay)


					continue

				if pghandle != None:
					try:

						log.info("Got Page: %s at %s On Attempt %s." % (pgreq.get_full_url(), time.ctime(time.time()), loopctr))
						if callBack:
							pgctnt = self.chunkRead(pghandle, 2 ** 17, reportHook = callBack)
						else:
							pgctnt = pghandle.read()
						if pgctnt != None:



							encoded = pghandle.headers.get('Content-Encoding')
							#preLen = len(pgctnt)
							if encoded == 'deflate':

								pgctnt = zlib.decompress(pgctnt, -zlib.MAX_WBITS)

							elif encoded == 'gzip':

								buf = StringIO.StringIO(pgctnt)
								f = gzip.GzipFile(fileobj=buf)
								pgctnt = f.read()

							elif encoded == "sdch":
								raise ValueError("Wait, someone other then google actually supports SDCH compression?")


							cType = pghandle.headers.get("Content-Type")
							self.log.info("File type: %s" % cType)
							if "text/html" in cType:				# If this is a html/text page, we want to decode it using the local encoding

								#print docType, charset, docType == unicode("text/html")
								#print
								#print "\"%s\"" % docType
								#print

								if (";" in cType) and ("=" in cType): 		# the server is reporting an encoding. Now we use it to decode the

									docType, charset = cType.split(";")
									charset = charset.split("=")[-1]


								else:		# The server is not reporting an encoding in the headers.

									# this *should* probably be done using a parser.
									# However, it seems to be grossly overkill to shove the whole page (which can be quite large) through a parses just to pull out a tag that
									# should be right near the page beginning anyways.
									# As such, it's a regular expression for the moment
									coding = re.search("charset=[\'\"]?([a-zA-Z0-9\-]*)[\'\"]?", pgctnt, flags=re.IGNORECASE)

									cType = ""
									if coding:
										cType = coding.group(1)

									if (";" in cType) and ("=" in cType): 		# the server is reporting an encoding. Now we use it to decode the

										docType, charset = cType.split(";")
										charset = charset.split("=")[-1]

									else:
										charset = "iso-8859-1"

								try:
									pgctnt = unicode(pgctnt, charset)
									#print type(pgctnt)
								except UnicodeDecodeError:
									self.log.error("Encoding Error! Stripping invalid chars.")
									pgctnt = pgctnt.decode('utf-8', errors='ignore')
							elif "text" in cType:
								self.log.critical("Unknown content type!")
								self.log.critical(cType)

								print("Unknown content type!")
								print(cType)


							break


					except:
						print "pghandle = ", pghandle

						traceback.print_exc()
						log.error(sys.exc_info())
						log.error("Error Retrieving Page! - Transfer failed. Waiting %s seconds before retrying" % delay)

						try:
							print("Critical Failure to retrieve page! %s at %s" % (pgreq.get_full_url(), time.ctime(time.time())))
							print "Exiting"
						except:
							print "And the URL could not be printed due to an encoding error"
						print
						log.error(pghandle)
						time.sleep(delay)

						pgctnt = "Failed"




		if errored and pghandle != None:
			print "Later attempt succeeded %s" % pgreq.get_full_url()
			#print len(pgctnt)


		if returnMultiple:
			if self.testMode:
				raise ValueError("testing mode does not support multiple return values yet!")
			return pgctnt, pghandle
		else:
			if self.testMode:
				return self.testMode
			else:
				return pgctnt



	def __del__(self):

		if self.cj is not None:							# If cookies were used
			self.cj.save(self.COOKIEFILE)					# save the cookies again


def isList(obj):
	"""isList(obj) -> Returns true if obj is a Python list.

	This function does not return true if the object supplied
	is a UserList object.
	"""
	return type(obj) == types.ListType


def isTuple(obj):
	"isTuple(obj) -> Returns true if obj is a Python tuple."
	return type(obj) == types.TupleType


class DummyLog:									# For testing WebGetRobust (mostly)
	logText = ""

	def __repr__(self):
		return self.logText

	def write(self, string):
		self.logText = "%s\n%s" % (self.logText, string)

	def close(self):
		pass


if __name__ == "__main__":
	print "Oh HAI"
	wg = WebGetRobust()

	content, handle = wg.getpage("http://www.lighttpd.net", returnMultiple = True)
	print handle.headers.get('Content-Encoding')
	content, handle = wg.getpage("http://www.example.org", returnMultiple = True)
	print handle.headers.get('Content-Encoding')
