import httplib2
import urllib
import httplib, mimetypes
import time
import socket
import re, htmlentitydefs


#from infra.logging import logger

HTTP_RESP_OK = 200
HTTP_RESP_FOUND = 302
HTTP_RESP_NOT_FOUND_ERR = 404 

# TODO: move to oreus.conf
MAX_NUM_RESEND = 5
RESEND_TIME = 5



class HttpDocumentNotFound(RuntimeError):
	pass

class HttpReadError(RuntimeError):
	pass


class HttpReader:

	def __init__(self):
		self.http_ = httplib2.Http()
		self.headers_ = {'Connection': 'Keep-Alive', \
						'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.0; fr; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11', \
						'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', \
						'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7'}
		
		self.contentLength_ = -1L
		self.url_ = None
		
	
	def addHeader(self, name, value):
		self.headers_[name] = value


	def removeHeader(self, name):
		try:
			del self.headers_[name]
		except KeyError:
			pass
		
		
	def getUrl(self):
		return self.url_
	
		
	def read(self, url, body = None, encode = False):
#		log = logger.getLogger()
		
		self.url_ = url

		for i in range(MAX_NUM_RESEND):
			resp = None
			err = None

#			log.debug("Fetching content from url = \"%s\" ..." % url)

			try:
				if body:
					if isinstance(body, dict):
						resp, content = self.http_.request(url, 'POST', headers=self.headers_, body=urllib.urlencode(body))
					else:
						resp, content = self.http_.request(url, 'POST', headers=self.headers_, body = body)
				else:
					resp, content = self.http_.request(url, "GET", headers=self.headers_)
				
				if resp.status not in [HTTP_RESP_OK, HTTP_RESP_FOUND]:
					self.contentLength_ = -1L
#					log.error("HTTP error occured, status: %d, reason: %s." % (resp.status, resp.reason))
					err = resp.reason
					break

				if resp.has_key('content-length'):
					self.contentLength_ = long(resp['content-length'])
				else:
					#log.warning("content-length not found in response header. Will use length of response body instead.")
					self.contentLength_ = long(len(content))
					
				if resp.has_key('set-cookie'):
					self.cookie_ = resp['set-cookie']

				break

			except socket.error, e:
				err = e
#				log.error("socket exception caught, reason: %s" % e)
#				log.info("Will resend the request in %d secs. (current try = %d, max try = %d)" % (RESEND_TIME, i, MAX_NUM_RESEND))
				time.sleep(RESEND_TIME)

			except KeyboardInterrupt, e:
				err = e
				self.contentLength_ = -1L
#				log.error("KeyboardInterrupt caught, reason: %s" % e)
				raise HttpReadError(e)
				
			except Exception, e:
				err = e
				self.contentLength_ = -1L
#				log.error("an Exception caught, reason: %s" % e)
				raise HttpReadError(e)

		if err:
			if resp and resp.status == HTTP_RESP_NOT_FOUND_ERR:
				raise HttpDocumentNotFound()
			else:
				raise HttpReadError(err)

#		log.debug("Content fetched, content length = %s bytes" % self.contentLength_)
		
				
		rawContent = content
				
		if encode:
			try:
				content = unescape(content)
				if not isinstance(content, unicode):
					content = unicode(content, 'utf-8')
			except UnicodeDecodeError, e:
				try:
					content = unicode(rawContent, 'utf-8')
				except UnicodeDecodeError, e:
					start = e.start
					end = e.end
					try:
						content = unicode(content, 'cp1250')
					except UnicodeDecodeError, e:
						try:
							content = content.replace(content[start:end], 'XXX')
							content = unicode(content, 'utf-8')
						except UnicodeDecodeError, e:
#							log.error('Failed to decode content to unicode, reason: %s' % e)
							raise
			except Exception, e:
#				log.error('Failed to removed html char refs, reason: %s' % e)
				raise
			
		return descape(content)

		


	def getContentLength(self):
		return self.contentLength_
	
	
	def getCookie(self):
		return self.cookie_


	def makeUrlDic(self, url):
		urlDic = {}
		urlToks = url.split("?")
		urlDic["url"] = urlToks[0] 
		nvDic = {}
		if len(urlToks) == 2:
			nvPairs = urlToks[1].split("&")
			for nvPair in nvPairs:
				nvToks = nvPair.split("=")
				nvDic[nvToks[0]] = nvToks[1]
			urlDic["data"] = nvDic
		else:
			urlDic["data"] = None

		return urlDic
	

##
# Removes HTML or XML character references and entities from a text string.
#
# @param text The HTML (or XML) source text.
# @return The plain text, as a Unicode string, if necessary.
def unescape(text):
	def fixup(m):
		text = m.group(0)
		if text[:2] == "&#":
			# character reference
			try:
				if text[:3] == "&#x":
					return unichr(int(text[3:-1], 16))
				else:
					return unichr(int(text[2:-1]))
			except ValueError:
				pass
		else:
			# named entity
			try:
				text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
			except KeyError:
				pass
		return text # leave as is
	return re.sub("&#?\w+;", fixup, text)	


hePattern = re.compile("&(\w+?);")


def descape_entity(m, defs=htmlentitydefs.entitydefs):
	# callback: translate one entity to its ISO Latin value
	try:
		return defs[m.group(1)]
	except KeyError:
		return m.group(0) # use as is


def descape(string):
	return hePattern.sub(descape_entity, string)




