'''
Created on 2012-1-31
This Util class contains some useful functions used in the project
@author: xiaokan
'''

from urllib2 import HTTPError, URLError
from xiaokan.Pojo import Tag
import HTMLParser
import formatter
import htmllib
import os
import urllib2
import cPickle 

class GoogleResultsParser(HTMLParser.HTMLParser):
    '''
    This class reads html codes and build a tree corresponding to the codes
    '''
    
    #Public constant values
    GOOGLE_SEARCH_RESULTS_ID = "ires"

#    cnt_in, cnt_out = 0, 0
    
    #If program meets with those tags need not to close, ignore it
    ignore_sets = ['br', 'img', 'hr', 'embed', 'input', 'meta']
    
    
    def __init__(self):
        HTMLParser.HTMLParser.__init__(self)
        self.tag_stack = []
        root_tag = Tag()
        root_tag.tag_name = "_root"
        root_tag.parent_tag = None
        self.tag_stack.append(root_tag)
        self.tagMap = {}
        
#        for testing
#        self.in_set = []
#        self.out_set = []

    def handle_starttag(self, tag, attrs):
        '''
        Encountered a start tag, creat a tag with its tag name and attributes, put it in the tag stack and link it with its parent tag
        '''
        #print "start tag:", tag, ", attrs:"
        #pprint.pprint(attrs)
        if tag in self.ignore_sets:
            return
        
        s = self.tag_stack
        cur_tag = Tag()
        par_tag = s[len(s) - 1]
        
        cur_tag.tag_name = tag
        cur_tag.tag_attr = attrs
        cur_tag.parent_tag = par_tag
        for name, value in attrs:
            if name == "id":
                self.tagMap[value] = cur_tag
        
        s.append(cur_tag)
        par_tag.sub_tags.append(cur_tag)
#        self.cnt_in += 1
#        #for testing
#        if tag not in self.in_set:
#            self.in_set.append(tag)
        
    def handle_endtag(self, tag):
        '''Encountered an end tag, simply pop up the stack'''
        self.cur_tag_name = tag
        if tag in self.ignore_sets:
            return
        self.tag_stack.pop()        
#        self.cnt_out += 1
        #print "end tag:", tag
        
#        #for testing
#        if tag not in self.out_set:
#            self.out_set.append(tag)
        
    def handle_data(self, data):
        '''Encountered data, save it'''
        self.tag_stack[len(self.tag_stack) - 1].tag_data = data
        #print "Data:", data
    
    def get_tag_tree(self):
        return self.tag_stack[0]
    
    def get_results(self, N):
        '''Returns the web site addresses of top N results by an array'''
        links_arr = []
#        print "In:", self.cnt_in, " Out:", self.cnt_out
#        _in = set(self.in_set)
#        _out = set(self.out_set)
#        print "Diff:", _in - _out, " / ", _out - _in
        if self.GOOGLE_SEARCH_RESULTS_ID not in self.tagMap:
            return links_arr
        else:
            res_tags_container_parent = self.tagMap[self.GOOGLE_SEARCH_RESULTS_ID]
            res_tags_container = res_tags_container_parent.sub_tags[0]
            res_tags = res_tags_container.get_subtags_by_tag_name("li")
            cnt = 0
            for tag_item in res_tags:    #link stored in the <a> of an <h3> every results item
                
                h3_tags = tag_item.get_subtags_by_tag_name('h3')
                if len(h3_tags) == 0:
                    continue
                
                a_tags = h3_tags[0].get_subtags_by_tag_name('a')
                if len(a_tags) == 0:
                    continue
                
                a_tag = a_tags[0]
                link = a_tag.get_attr_by_name('href')
                
                if not link.startswith("http"):     #This is a google recommendation result, ignore it here
                    continue
                
                if link != None:
                    cnt += 1
                    links_arr.append(link)
                    
                if cnt >= N:    #enough
                    break
                
            return links_arr

class GoogleHelper(object):    
    '''
    Initial for google results
    '''
    def __init__(self):
        self._google_url = "http://www.google.com/search?sourceid=chrome&ie=UTF-8&q="
        self._opener = urllib2.build_opener()
        self._opener.addheaders = [('User-agent', 'Mozilla/5.0')]
        #data = urllib.urlopen(url)
        #htmlparser.feed(data.read())
        #pprint.pprint(data.read())
        #htmlparser.close()
        #urllib2.install_opener(opener)
        #data = urllib2.urlopen(url)
        
    '''Get google results by the keywords, the start number of pages is optional'''
    def getHTMLResults(self, keywords, start_number_of_pages=0):
        url = self._google_url + keywords + "&start=" + str(start_number_of_pages * 10)
        data = self._opener.open(url).read()
        return data        
        #return self.get_Body_Part(data)
        
    '''Get the body part of html'''
    def get_Body_Part(self, data):
        start = data.find("<body")
        end = data.find("</body>")
        return data[start:(end + 8)]
    
#class GenericHelper(object):
#    
#    def __init__(self):
#        self._opener = urllib2.build_opener()        
#    
#    def get_html(self, url):
#        data = self._opener.open(url).read()
#        return data
#        
        
class HyperlinkParser(htmllib.HTMLParser):

    #Protocol prefixes
    PROTOCOL_PREFIXES = ["http://", "ftp://", "https://"]
    #Timeout setting for http retrieving
    MY_TIMEOUT = 10

    def __init__(self):
        self.formatter = formatter.NullFormatter()
        htmllib.HTMLParser.__init__(self, self.formatter)
        pass
    
    #Reset the link parser by calling father's reset method and reset the parameters
    def reset(self):
        htmllib.HTMLParser.reset(self)
        self.links = []
        self.data = ''
        self._opener = urllib2.build_opener()
        self.base_url = ''
        
    #Get the html of the url and feed the parser 
    def feed_url(self, url):        
        last_pos = url.rfind("/", 8)    #find the last / but ignore the parts containing it like http://
        
        if last_pos == -1:              #find the base url
            self.base_url = url + "/"
        else:
            self.base_url = url[0:last_pos + 1]      
                  
        try:
            resp = self._opener.open(url, timeout=self.MY_TIMEOUT)
            if resp.headers.type != "text/html":
                return False
            self.data = resp.read()
            print "Processing: ", url
            self.feed(self.data)
            
        except HTTPError, error_code:               #Handle Http errors and output it
            if error_code.code == 401:
                print "Authorization required"
            elif error_code.code == 404:
                print "File not found"
            elif error_code.code == 500:
                print "Http server error"
            else:
                print error_code
            return False
        
        except URLError, error_code:
            print error_code
            return False
        
        except Exception:
            print "Other Exception!"
            return False
        
        return True
            
    #When parser meet an <A> tag, call this method and get the link in it (relative address is automatically parsed)
    def start_a(self, attrs):   
        for attr_name, value in attrs:
            if attr_name == 'href' and value != None and value != '':
                link = self.addr_rela2abs(value)
                self.links.append(link)
    
    #Convert relative address to absolute address and return it, if it is an absolute address, directly return it
    def addr_rela2abs(self, link):
        for pro_pre in self.PROTOCOL_PREFIXES:
            if link.startswith(pro_pre):
                return link
        return self.base_url + link
    
    def get_links(self): 
        return self.links
    
    def get_data(self):
        return self.data
    
class SerializationUtils(object): #Test haven't been done
    
    CRAWLER_DATA_STRUCTURE_STORE_PATH = "d:/google_pages/crawler_ds.dat"
    
    #This method used to check is crawler crawled before, return crawler object if true, or return None
    def is_crawled_before(self):
        try:
            if os.path.isfile(self.CRAWLER_DATA_STRUCTURE_STORE_PATH):
                fin = file(self.CRAWLER_DATA_STRUCTURE_STORE_PATH)
                ds = cPickle.load(fin)
                return ds
            else:
                return None
        except:
            return None
        
    #This method saves crawler to disk
    def save_crawler2disk(self, crawler):
        if crawler == None:
            return
        try:
            fout = file(self.CRAWLER_DATA_STRUCTURE_STORE_PATH, "w")
            cPickle.dump(crawler, fout)
        except: 
            print "Saving error!"
            
    
