'''
Created on Oct 27, 2009

@author: sternron
'''
import orchid
import graph
import logging
import cliques.utils
import urllib
import cliques.cliqueFinder
import webgraphs.utils
from xml.sax import saxutils
import re

class webgraphCrawler(orchid.NaiveAnalyzer):
    '''
    classdocs
    '''


    def __init__(self, linksToFetchAndCond, siteQueueAndCond, db, \
                 output_filename,file_format='xml',\
                 clique_sizes_file='cliques.txt',\
                 domain_names_only=True):
        '''
        Constructor
        '''
        orchid.NaiveAnalyzer.__init__(self, linksToFetchAndCond, siteQueueAndCond, db)
        self.output_filename = output_filename
        self.file_format = file_format
        self.clique_sizes_file = clique_sizes_file
        self.domain_names_only = domain_names_only
        
#    def selectNextUrl(self):
#        return orchid.NaiveAnalyzer.selectNextUrl(self)

    def run(self):
        """
        Performs the main function of the analyzer. In this case,
        just adds all the hyperlinks to the toFetch queue.
        """
                
        
        # Initialize the web graph
        # TODO: MAKE THIS PART THREAD SAFE. CURRENTLY THEORETICALLY AN EDGE CAN BE ADDED BEFORE
        # ADDING THE INITIAL NODES.
        self.webgraph = graph.graph()
        links_sets = self.linksToFetch[0]
        for dom in links_sets:
            for link in links_sets[dom]:
                hostname = self.filter_site(link)
                if hostname is not None:
                    logging.info("Added node %s" % hostname)                
                    self.webgraph.add_node(hostname)
                else:
                    logging.info("Filtered node %s" % hostname)
        
        # repeat while the stop condition hasn't been set
        orchid.NaiveAnalyzer.run(self)
                
    def report(self):
        ''' Output the run results '''
        logging.info("---------------- START CRAWLING REPORT --------------")
        result = self.webgraph
        logging.info("Exporting graph with %d nodes and %d edges" % (len(result.nodes()),len(result.edges())))        
        webgraphs.utils.write_webgraph_files(self.output_filename,self.webgraph)
                
#        cliques.utils.export_graph(self.webgraph,self.output_filename,file_format=self.file_format)
#        cliques.utils.export_graph(self.webgraph,self.output_filename)
        
        max_clique = 2
        clique_finder = cliques.cliqueFinder.cliqueFinder()
        clique_finder.setup(self.webgraph)
        nodes = self.webgraph.nodes()
        for k in xrange(3,10):
            clique = clique_finder.find_clique(k, nodes)
            if clique==None:
                break
            else:
                max_clique=k
                print "%d-Clique!" % k
                for node in clique:
                    print node 
                
        
        cliques_file = open(self.clique_sizes_file,"a")
        cliques_file.write("MaxClique,%d,#Nodes,%d,#Edges,%d\n" % (max_clique,\
                         self.webgraph.size,len(self.webgraph.edges())))
        cliques_file.close()
        logging.info("---------------- END CRAWLING REPORT --------------")         

                  
    def analyzeSite(self, db, site):
        """
        Processes the site and adds it to the db.
        """
        # check if the site was already crawled
        db['crawled'][site.stringUrl] = True
        
        site_url = self.filter_site(site.stringUrl)        

        # decide which links to crawl (in this case all regular links)
        
        #self.__newLinksToCrawl = [link for link in site.links['regular'] if (not db['crawled'].has_key(link))]
        self.__newLinksToCrawl = []
        for link in site.links['regular']:
            hostname = self.filter_site(link)
            if hostname is not None:                 
                if (not db['crawled'].has_key(hostname)):  
                    db['crawled'][hostname] = True              
                    self.__newLinksToCrawl.append(hostname)
                    self.webgraph.add_node(hostname)
                    logging.info("Added node %s" % hostname)
                
                self.webgraph.add_edge(site_url,hostname)
                logging.info("Added edge (%s,%s)" % (site_url,hostname))
                
            else: # If link has been filtered
                logging.info("Link %s has been filtered" % link)

    def filter_site(self,str):
        ''' Filter the host name of a site. return the filtered hostname
        of None for filtering the whole site ''' 
        if self.domain_names_only:
            if str.find("http://") == -1: # If hostname already stripped
                return str
            ''' Extract a domain name from a host name '''
            PATTERN_STR = ".*(http://(?P<domain>[^/]+)).*"
            pattern = re.compile(PATTERN_STR)
            result = pattern.match(str)
            print "extracting domain from " + str
            return result.group('domain')
        else:
            return str
    
                 
    def addSiteToFetchQueue(self, domain_to_links):
        """
        Adds links to the fetch queue. 
        Possible refactoring can move the filtering to here,
        but currently requires to much refactoring inside Orchid
        """
        logging.debug("Adding to lfs")
        domMap = self.reorganizeByDomain(self.__newLinksToCrawl)
        for dom in domMap:
            if domain_to_links.has_key(dom):
                domain_to_links[dom] += domMap[dom]
            else:
                domain_to_links[dom] = domMap[dom]       
        
        