import urllib
import simplejson
from WnParser import WnParser
import HTMLParser2
from urllib import FancyURLopener
from urlparse import urlparse

class MyOpener(FancyURLopener):
	version = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11'



class WnGoogle:
	_url 		= "https://www.googleapis.com/customsearch/v1?"
	_mobilizer  = "http://text.readitlaterlist.com/v2/text?apikey=145gcW64pdtO7VUq56Thn7bKR0A7lebz&";
	_keyword 	= None;
	_cx 		= None
	_key 		= None
	_db 		= None
	_results 	= []

	def __init__(self, cx, key):
		self._cx = cx
		self._key = key

	def setKeyword(self, keyword):
		self._keyword = keyword

	def setDbAdapter(self, adapter):
		self._db = adapter;

	def getMobilizedHTML(self, link):
		try:
			return urllib.urlopen(self._mobilizer + urllib.urlencode({'url':link})).read()
		except:
			print "===ERROR: failed to get mobilized version ==="
			return "";

	def getResults(self, processPages=4):
		counter = 0
		self._db.removeByKeyword(self._keyword);

		for page in range(0, processPages):
			url = self._url + urllib.urlencode({
				"q" : self._keyword,
				"key" : self._key,
				"cx" : self._cx,
				"start" : (page*10)+1,
			})
			print "===Getting search results (page %d)===" % (page+1)

			results = simplejson.loads( urllib.urlopen(url).read() )
			myopener = MyOpener();

			for i in results['items']:
				counter+=1
				print "===Downloading '%s'===" % i['link']
				try:
					html = myopener.open(i['link']);
					html = html.read().decode('utf-8','ignore')

					try:
						p = WnParser()
						p.cleanup()
						p.feed(html)
					except Exception as e:
						html = ''
						print "PARSE FAILED!!! Error was: '%s'" % e
					try:
						m = WnParser()
						m.cleanup()
						m.feed(self.getMobilizedHTML(i['link']))
					except Exception as e:
						print "Getting Mobilized page failed!!! Error was: '%s'" % e						

					try: 
						url_parsed = urlparse(i['link'])
						pr = myopener.open("http://pr.webinfodb.net/pr.php?key=ZjhiY2NlNDdlYTJkZDhkNjk3YjBkZDcy&" + urllib.urlencode({'url':url_parsed.netloc})).read();
					except:
						pr = -1
						print "Getting PageRank failed!!! Error was: '%s'" % e						

					result = {
						'position' : counter,
						'title' : i['title'].encode('utf8'),
						'description' : i['snippet'].encode('utf8', 'strip'),
						'html' : html.encode('utf8', 'strip'),
						'html_stripped' : p.stripped_text.encode('utf8', 'strip'),
						'pr': pr,
						'mobilized' : m.stripped_text.encode('utf8', 'strip'),
						'url' : i['link'].encode('utf8', 'strip'),
						'h1' : p.getH1().encode('utf8', 'strip'),
						'h2_1' : p.getH2_1().encode('utf8', 'strip'),
						'h2_2' : p.getH2_2().encode('utf8', 'strip'),
						'links' : p.getLinks().encode('utf8', 'strip'),
					};

					self._db.insertSearchResult( self._keyword, result )
		
					self._results.append(result);
				except Exception as e:
					print "===DOWNLOAD FAILED!!!!==="
					print e

	def parseResults(self):
		#for i in self._results:
		print "parse"
	
	def saveResults(self):
		print "saving"
		self._db.replaceSearchResults( self._keyword, self._results )