'''
Created on Jan 7, 2010

@author: Roni
'''
import graph
import logging
import sys
import cliques.randomCliqueSearch
import orchid
import re
from cliques.randomCliqueSearch import randomCliqueSearch
from cliques.knownDegreeSearch import knownDegreeSearch
from cliques.cliqueStarSearch import cliqueStarSearch
from cliques.cliqueSearchLowerbound import CliqueSearchLowerbound
from cliques.mdp.mdpBasedSearch import mdpBasedSearch
from cliques.mdp.limitedSampling import LimitedSampling
import timerManager
import os
import urllib2
import time
import random
#import urllib2.HTTPError


class OnlineCliqueSearch(object):
    ''' A class that implements an online search using the unknown graph clique algorithms '''
    def __init__(self, clique_algorithm):
        orchid.Orchid.ignore_robots=True # Tells orchid code to ignore robots.txt in web sites
        #self.algorithm = cliques.randomCliqueSearch.randomCliqueSearch()
        self.algorithm = clique_algorithm
        self.urlHandler = orchid.UrlHandler()
        self.extractor = orchid.OrchidExtractor()
        self.filter_pattern = re.compile("^http://scholar.google.com/scholar\?cites=.*$")
        self.title_pattern = re.compile('document.title="(?P<itemname>.*)"')
        self.timer_manager = timerManager.timer_manager
        self.additional_roots=[]
        self.root_hostname = None
        self.node_to_index = dict()
        self.nodes = []
        self.node_to_title = dict()
        self.ckg = graph.graph()

    def import_config(self, config_file_name):
        ''' Import a config dictionary from a properties file '''
        config_file = open(config_file_name, 'r')
        config_dir = os.path.dirname(config_file_name)
        config = dict()
        for line in config_file:
            parts = line.split("=")
            config[parts[0].strip()] = parts[1].strip()
        
        return config

    def search(self, exploration_limit):
        ''' Search for the desired k-clique, starting from the given current node index
        and until the clique is found or the exploration limit has been found'''        
        while ((self.algorithm.halt() == False) and (self.algorithm.iteration <= exploration_limit)):
            self.timer_manager.start('choose_node_runtime')
            current_node_index = self.algorithm.choose_node()
            self.algorithm.current_node = current_node_index
            self.timer_manager.stop('choose_node_runtime')
            self.timer_manager.start('exploration_runtime')
            self.explore(current_node_index)
            self.timer_manager.stop('exploration_runtime')
            #cliques.utils.export_graph(self.ckg, "ckg-%d.dot" % self.algorithm.iteration, "dot")
            self.timer_manager.start('expand_runtime')
            self.algorithm.expand(current_node_index)
            self.timer_manager.stop('expand_runtime')
            self.algorithm.iteration = self.algorithm.iteration + 1

    
    def insert_new_node(self,node):
        ''' Insert a new node to the graph. Store a mapping of node to index and vice versa '''
        node_index = len(self.nodes)
        self.node_to_index[node] = node_index
        self.nodes.append(node)       
        self.ckg.add_node(node_index)
        return node_index
               
    def run(self,root_hostname, k,exploration_limit,additional_roots = []):
        ''' Run the online search'''
        self.timer_manager.start('total_runtime')
        # Initialization        
        self.root_hostname = root_hostname
        self.node_to_index = dict()
        self.nodes = []
        self.node_to_title = dict()
        self.ckg = graph.graph()
        
        current_node_index = self.insert_new_node(root_hostname)                
        self.explore(current_node_index)            
        self.algorithm.setup(self.ckg, k, current_node_index)
        
        self.additional_roots = additional_roots
        if additional_roots is not None:
            for root in additional_roots:
                logging.info("Adding extra root %s" % root)
                node_index = self.insert_new_node(root)
                self.algorithm.generate(None,node_index)
        
        self.search(exploration_limit)
             
        logging.info("Algorithm halted! [%s], Expanded %d nodes" % (self.algorithm.done,len(self.algorithm.expanded)))
        self.timer_manager.stop('total_runtime')           
    
    def explore(self, node_index):
        ''' Explore a host. This means downloading it from the URL, parsing it and updating the ckg '''
        hostname = self.nodes[node_index]
        links = self.process_host(hostname)
        self.update(self.ckg,node_index,links)
        logging.info("Explored Node %d:%s" % (node_index, self.node_to_title[hostname]) )

    def offline_explore(self, node_index,root_ckg):
        ''' Explore a host using the offline available root CKG.
            Assumes that the explored node is one of the root nodes, 
            i.e. its title is equal to its hostname. 
            Also, assumes that the node neighbors have already been added '''
        
        hostname = self.nodes[node_index]
        # Update node_to_title dictionary
        self.node_to_title[hostname] = hostname
        
        # Update edges in CKG      
        for neighbor in root_ckg.neighbors(str(node_index)):
            self.ckg.add_edge(node_index,int(neighbor))
        
        logging.debug("Offline exploration of Node %d:%s" % (node_index, self.node_to_title[hostname]) )


    def process_host(self,hostname):
        ''' Downloads the web page, parse it and return a list of relevant links '''        
        self.urlHandler.processUrl(hostname.replace(" ","+")) 
        logging.info("Processes URL!")          
        content = self.urlHandler.getSite()
        self.extractor.setSite(hostname, content)
        self.extractor.extract()
        
        # Extract title (MAINLY FOR ESTHETICAL REASONS)   
        if (self.root_hostname==hostname) or (hostname in self.additional_roots):
            self.node_to_title[hostname] = hostname
        else:
            self.node_to_title[hostname] = self.extract_title(self.extractor.getRawContent())
        # Extract links
        links = self.extractor.getLinks()
        relevant_links = []
        for link in links['regular']:
            if self.filter_pattern.match(link) is not None:
                relevant_links.append(link)
        return relevant_links
 
    def update(self, ckg, node_index, links):
        '''
        Update the CKG with the new links of node_index (just explored) 
        ckg - The currently known graph
        node_index - The index of the node that was just explored
        links - The URLs of the links that was just discovered in the exploration
        '''        
        for link in links:
            # If link is new - add it
            if self.node_to_index.has_key(link)==False:
                link_index = self.insert_new_node(link)
            else:
                link_index = self.node_to_index[link]
            ckg.add_edge(node_index, link_index)

    def extract_title(self,site_content):
        ''' Extract the title of the paper from a google scholar citation page '''
        match = self.title_pattern.search(site_content)
        return match.group('itemname')    
    
    def stats(self):
        record = dict()
        record['clique'] = self.algorithm.output_clique()
        record['iterations'] = self.algorithm.iteration
        return record 
    
    def output_clique_members_titles(self):
        ''' After search has completed, output the host names of the clique members'''
        the_clique = self.algorithm.output_clique()
        clique_members = []
        for node in the_clique:
            host = self.nodes[node]
            if self.node_to_title.has_key(host):
                clique_members.append(self.node_to_title[host])
            else:
                self.explore(node)
                clique_members.append(self.node_to_title[host])
        return clique_members
    
    def explort_ckg(self,graph_file_name, host_file_name=None):
        ''' Print the currently known graph to a file, possibly printing also 
        a matching of index to real host name.'''
        cliques.utils.export_graph(self.ckg, graph_file_name, 'dot')
        if host_file_name is not None:
            host_file = open(host_file_name,'w')
            index = 0
            for node in self.nodes:
                host_file.write("%d\t%s\n" % (index,node))
                index= index+1
            host_file.close()
    
    def export_state(self,config_file_name):
        ''' Export the entire state of the search '''
        graph_file_name="%s.dot" % config_file_name
        host_file_name="%s.nodes.txt" % config_file_name
        expanded_list_file_name="%s.expanded.txt" % config_file_name
        
        
        self.explort_ckg(graph_file_name, host_file_name)
        expanded_out_file = file(expanded_list_file_name,'w') 
        for node in self.algorithm.expanded:
            expanded_out_file.write("%d\t%s\n" % (node, self.node_to_title[self.nodes[node]]))
        expanded_out_file.close()
        
        config_file = file(config_file_name,'w')
        config_file.write("k=%d\n" % self.algorithm.k)
        config_file.write("graph_file=%s\n" % graph_file_name)
        config_file.write("nodes_file=%s\n" % host_file_name)
        config_file.write("expanded_file=%s\n" % expanded_list_file_name)
        config_file.close()
        
        
    def import_state(self, config_file_name):
        ''' Import the entire state of the search from files, and update algorithm state accordingly '''
        logging.info("Loading config")
        config = self.import_config(config_file_name)
        self.config = config
        
        logging.info("Loading root pages from file...")
        
        root_nodes_file = open(config['nodes_file'], 'r')
        index = 0
        
        logging.info("Loading ckg nodes from file...")
        for line in root_nodes_file:
            line = line.strip()
            parts = line.split('\t')
            host_name = parts[1]
            node_index = int(parts[0])
            
            # Verify that nodes in file are ordered
            if index!=node_index:
                raise ValueError("Root nodes are not ordered - indexing problem")
            index = index + 1
            
            self.insert_new_node(host_name)
        
        logging.info("Loading ckg structure from file...")
        root_nodes_file = open(config['nodes_file'], 'r')
        root_ckg = cliques.utils.import_graph(config['graph_file'], 'dot')
        
        # Offline explore all nodes that have been previously explored
        logging.info("Loading list of explored nodes from file...")
        
        root_explored_file = open(config['expanded_file'], 'r')
        root_explored = []
        for line in root_explored_file:
            explored_node = int(line.split('\t')[0])
            root_explored.append(explored_node)
            self.offline_explore(explored_node, root_ckg)
        
        # Choose the first as the initial node (NOT IMPORTANT, COULD BE ANY EXPLORED NODE)
        logging.info("Offline exploring the previously expanded nodes...")
        current_node_index=root_explored[0]
        self.algorithm.setup(self.ckg, int(config['k']), current_node_index)
        
        for node in root_explored[1:]:
            self.algorithm.generate(None, node)
        for node in root_explored:
            logging.debug("Expanding %d ..." % node)
            self.algorithm.expand(node)  
              
     
if __name__ == '__main__':
    main()