'''
Created on Dec 5, 2009

@author: Roni
'''
'''
Created on Dec 4, 2009

@author: Roni
'''
import orchid
import graph
import logging
import cliques.utils
import urllib
import cliques.cliqueFinder
from xml.sax import saxutils
import re
import webgraphs.webgraphCrawler
import cliques.cliqueFinder



class intraSiteAnalyzer(webgraphs.webgraphCrawler.webgraphCrawler):
    '''
    A general class for crawling inside a web site. Subclasses should implement filterSite and extract_page_name
    '''


    def __init__(self, linksToFetchAndCond, siteQueueAndCond, db, \
                 out_file_name , file_format):
        '''
        Constructor
        '''
        orchid.NaiveAnalyzer.__init__(self, linksToFetchAndCond, siteQueueAndCond, db)
        self.out_file_name = out_file_name
        self.file_format = file_format
        self.ignore_list = []
                
    def filter_site(self,str):     
        ''' Subclasses should Override this method and extract_page_name() '''   
        PATTERN_STR= "^http://en.wikipedia.org/wiki/[_|\w]*$"
        pattern = re.compile(PATTERN_STR)
        result = pattern.match(str)
        if result is not None:
            site = result.group()
            if site not in self.ignore_list:
                return site    
        return None

    def extract_page_name(self,str):         
        ''' Extract the page name from the hostname '''
        ''' Subclasses should Override this method and filterSite() '''           
        PATTERN_STR= "^http://en.wikipedia.org/wiki/(?P<itemname>[_|\w]*)$"        
        itemname_pattern =  re.compile(PATTERN_STR)               
        match_result = itemname_pattern.match(str)
        return match_result.group("itemname")

    def report(self):
        ''' Output the run results '''
        logging.info("---------------- START CRAWLING REPORT --------------")
        result = self.webgraph
        logging.info("Exporting graph with %d nodes and %d edges" % (len(result.nodes()),len(result.edges())))        
        self.write_webgraph_files(self.webgraph)
                
        max_clique = 2        
        clique_finder = cliques.cliqueFinder.cliqueFinder()
        clique_finder.setup(self.webgraph)
        nodes = self.webgraph.nodes()
        set_of_cliques = []
        for k in xrange(3,10):
            clique = clique_finder.find_clique(k, nodes)
            if clique==None:
                break
            else:
                max_clique=k
                print "%d-Clique!" % k
                for node in clique:
                    print node
                set_of_cliques.append(clique) 
                
        
        cliques_file = open(self.out_file_name+"-cliques.txt","a")
        cliques_file.write("MaxClique,%d,#Nodes,%d,#Edges,%d\n" % (max_clique,\
                         self.webgraph.size,len(self.webgraph.edges())))
        for clique in set_of_cliques:
            cliques_file.write("-- %d-Clique --\n" % len(clique))
            for node in clique:
                cliques_file.write("    %s\n" % self.extract_page_name(node))    
                
        cliques_file.close()
        logging.info("---------------- END CRAWLING REPORT --------------") 

    
    def create_page_graph(self,webgraph):
        ''' Creates an isomorphic graph with page names instead of full hostnames '''
        host_to_item =dict()
        page_graph = graph.graph()
        hostnames = webgraph.nodes()
        for node in hostnames:
            page_name = self.extract_page_name(node)
            host_to_item[node] = page_name
            page_graph.add_node(page_name)
        
        for node in hostnames:
            page_name = self.extract_page_name(node)
            for neighbor in webgraph.neighbors(node):
                 neighbor_page_name = self.extract_page_name(neighbor)
                 page_graph.add_edge(page_name, neighbor_page_name)
        
        return page_graph

    def write_webgraph_files(self,webgraph):
        ''' Outputs Three files. The first two files are the webgraph
        one file as an index graph and another file as a page name graph.
        The third file contains a mapping of index to page name to hostname. '''
        
        (index_graph,index_to_site) = webgraphs.utils.create_index_graph(webgraph)        
        page_graph = self.create_page_graph(webgraph)
        
        cliques.utils.export_graph(page_graph, self.out_file_name + "-graph.dot", self.file_format)
        cliques.utils.export_graph(index_graph, self.out_file_name + "-index-graph.dot", self.file_format)
        
        site_file = open(self.out_file_name+"-sites.txt","w")        
        for index in index_graph:            
            hostname = index_to_site[index]
            site_file.write("%d\t%s\t%s\n" % (index,self.extract_page_name(hostname),hostname))
        site_file.close()
     