'''
Created on Feb 14, 2010

@author: Roni
'''

'''
Created on Jan 7, 2010

@author: Roni
'''
import graph
import logging
import sys
import cliques.randomCliqueSearch
import orchid
import re
from cliques.randomCliqueSearch import randomCliqueSearch
from cliques.knownDegreeSearch import knownDegreeSearch
from cliques.cliqueStarSearch import cliqueStarSearch
from cliques.cliqueSearchLowerbound import CliqueSearchLowerbound
from cliques.mdp.mdpBasedSearch import mdpBasedSearch
from cliques.mdp.limitedSampling import LimitedSampling
import timerManager
import os
import urllib2
import time
import random
#import urllib2.HTTPError

RESOURCE_RESULTS_DIR = "../resources/results"
RESOURCE_SEED_PAPERS = "../resources/nanotoxic/papersOver3.txt"

class NanoToxicMain(object):
    ''' A class that implements an online search using the unknown graph clique algorithms '''
    def __init__(self, clique_algorithm):
        orchid.Orchid.ignore_robots=True # Tells orchid code to ignore robots.txt in web sites
        #self.algorithm = cliques.randomCliqueSearch.randomCliqueSearch()
        self.algorithm = clique_algorithm
        self.urlHandler = orchid.UrlHandler()
        self.extractor = orchid.OrchidExtractor()
        self.filter_pattern = re.compile("^http://scholar.google.com/scholar\?cites=.*$")
        self.title_pattern = re.compile('document.title="(?P<itemname>.*)"')
        self.timer_manager = timerManager.timer_manager
    
    def insert_new_node(self,node):
        ''' Insert a new node to the graph. Store a mapping of node to index and vice versa '''
        node_index = len(self.nodes)
        self.node_to_index[node] = node_index
        self.nodes.append(node)       
        self.ckg.add_node(node_index)
        return node_index
    
    def run(self,root_hostname, k,exploration_limit):
        self.timer_manager.start('total_runtime')
        # Initialization        
        self.root_hostname = root_hostname
        self.node_to_index = dict()
        self.nodes = []
        self.node_to_title = dict()
        self.ckg = graph.graph()
        
        current_node_index = self.insert_new_node(root_hostname)    
        self.explore(current_node_index)
        self.algorithm.setup(self.ckg, k, current_node_index)
      
        while(self.algorithm.halt()==False):
            self.timer_manager.start('choose_node_runtime')
            current_node_index = self.algorithm.choose_node()
            self.algorithm.current_node = current_node_index
            self.timer_manager.stop('choose_node_runtime')
            
            self.timer_manager.start('exploration_runtime')
            self.explore(current_node_index)
            self.timer_manager.stop('exploration_runtime')
            
            #cliques.utils.export_graph(self.ckg, "ckg-%d.dot" % self.algorithm.iteration, "dot")
            
            self.timer_manager.start('expand_runtime')
            self.algorithm.expand(current_node_index)
            self.timer_manager.stop('expand_runtime')
            
            self.algorithm.iteration=self.algorithm.iteration+1
            if ((self.algorithm.iteration > exploration_limit)):
                break
            time.sleep(10) # Added pause between explorations                
        logging.info("Algorithm halted! [%s]" % self.algorithm.done)
        self.timer_manager.stop('total_runtime')           
    
    def explore(self, node_index):
        ''' Explore a host. This means downloading it from the URL, parsing it and updating the ckg '''
        hostname = self.nodes[node_index]
        links = self.process_host(hostname)
        self.update(self.ckg,node_index,links)
        logging.info("Explored Node %d:%s" % (node_index, self.node_to_title[hostname]) )

    def process_host(self,hostname):
        ''' Downloads the web page, parse it and return a list of relevant links '''
        self.urlHandler.processUrl(hostname)           
        content = self.urlHandler.getSite()
        self.extractor.setSite(hostname, content)
        self.extractor.extract()
        
        # Extract title (MAINLY FOR ESTHETICAL REASONS)
        if self.root_hostname!=hostname:
            self.node_to_title[hostname] = self.extract_title(self.extractor.getRawContent())
        else:
            self.node_to_title[hostname] = hostname
        
        # Extract links
        links = self.extractor.getLinks()
        relevant_links = []
        for link in links['regular']:
            if self.filter_pattern.match(link) is not None:
                relevant_links.append(link)
        return relevant_links
 
    def update(self, ckg, node_index, links):
        '''
        Update the CKG with the new links of node_index (just explored) 
        ckg - The currently known graph
        node_index - The index of the node that was just explored
        links - The URLs of the links that was just discovered in the exploration
        '''        
        for link in links:
            # If link is new - add it
            if self.node_to_index.has_key(link)==False:
                link_index = self.insert_new_node(link)
            else:
                link_index = self.node_to_index[link]
            ckg.add_edge(node_index, link_index)

    def extract_title(self,site_content):
        ''' Extract the title of the paper from a google scholar citation page '''
        match = self.title_pattern.search(site_content)
        return match.group('itemname')    
    
    def stats(self):
        record = dict()
        record['clique'] = self.algorithm.output_clique()
        record['iterations'] = self.algorithm.iteration
        return record 
    
def main(): 
    logging.basicConfig(level=logging.DEBUG)
    logging.root.addHandler(logging.StreamHandler(sys.stdout))
    
    old_configurations = []
    result_file_name = '%s/onlineCliqueSearch.txt' % RESOURCE_RESULTS_DIR
    if os.path.exists(result_file_name)==False:
        out_file = open(result_file_name,'w')
        out_file.write('root\t k\t iteration\t alg\t runtime\t iterations\t explore.Runtime\t ch.Runtime\t expand.Runtime\t clique\t Success\n')
        out_file.close()
    else: 
        in_file = open(result_file_name,'r')
        header_line = True
        for line in in_file:
            if header_line:
                header_line=False
            else:
                line = line.strip()
                line_parts = line.split("\t")
                root = line_parts[0].strip()
                k = int(line_parts[1].strip())
                iteration = int(line_parts[2].strip())
                algorithm = line_parts[3].strip()
                configuration = (root,k,iteration,algorithm)
                old_configurations.append(configuration)            
        in_file.close()
    out_file = open(result_file_name,'a')

    algorithm_tuple = [randomCliqueSearch(),knownDegreeSearch(),cliqueStarSearch()]
    depth = 3
    samples = 50
#    for p in [0.5]:
#        algorithm_tuple.append(mdpBasedSearch(LimitedSampling,"RClique*-%d-%d-%f" % (depth, samples,p)\
#                               ,depth, samples,constant_edge_prob=p));        

    root_pages = {
             'GS-MultiAgent':'http://scholar.google.com/scholar?q=MultiAgent',
             'GS-Israel':'http://scholar.google.com/scholar?q=Israel',
             'GS-Artificial-Intelligence-Clique':'http://scholar.google.com/scholar?q=Artificial+Intelligence+Clique',
             'GS-Robot-Navigation':'http://scholar.google.com/scholar?q=Robot+Navigation',
             'GS-webgraph':'http://scholar.google.com/scholar?q=webgraph',
             'GS-Social-Graph':'http://scholar.google.com/scholar?q=Social+Graph',
             'GS-Natural-Language-Processing':'http://scholar.google.com/scholar?q=Natural+Language+Processing',
             'GS-Unknown-Graphs':'http://scholar.google.com/scholar?q=Unknown+Graphs',
             'GS-Physical-Graphs':'http://scholar.google.com/scholar?q=Physical+Graphs',
             'GS-Model-Based-Diagnosis':'http://scholar.google.com/scholar?q=Model+Based+Diagnosis',
             'GS-Man-Machine-Interface':'http://scholar.google.com/scholar?q=Man+Machine+Interface',
             'GS-Information+Systems':'http://scholar.google.com/scholar?q=Information+Systems',
             'GS-Zero-Knowledge':'http://scholar.google.com/scholar?q=Zero+Knowledge',
             'GS-Internet':'http://scholar.google.com/scholar?q=Internet',
             'GS-Heuristic-Search':'http://scholar.google.com/scholar?q=Heuristic+Search',
             'GS-Data-Mining':'http://scholar.google.com/scholar?q=Data+Mining',
             'GS-Data-Fusion':'http://scholar.google.com/scholar?q=Data+Fusion',
             'GS-Search-Clique':'http://scholar.google.com/scholar?q=Search+Clique',
             'GS-Lookahead':'http://scholar.google.com/scholar?q=Lookahead',             
             'GS-Subgraph-Isomorphism':'http://scholar.google.com/scholar?q=Subgraph+Isomorphism',
             'GS-Software-Engineering':'http://scholar.google.com/scholar?q=Software+Engineering',             
             'GS-Encryption':'http://scholar.google.com/scholar?q=Encryption',
             'GS-Online-Algorithms':'http://scholar.google.com/scholar?q=Online+Algorithms',
             'GS-Sublinear-Algorithms':'http://scholar.google.com/scholar?q=Sublinear+Algorithms',             
             'GS-MDP':'http://scholar.google.com/scholar?q=MDP'
             }
    
    
    for root in root_pages:
        root_pages[root]=root_pages[root]+"&lr=lang_en&num=100"
    
    first_configuration = True
    exploration_limit = 100
    for root_page in root_pages:
        for k in xrange(4,8):  
            for iteration in xrange(2): # 2 iterations to ignore caching       
                for clique_algorithm in algorithm_tuple:    
                    configuration = (root_page,k,iteration, clique_algorithm.__str__())
                    if (configuration in old_configurations) == False: 
                        searcher = OnlineCliqueSearch(clique_algorithm)             
                        try:                            
                            searcher.timer_manager.create_timer('total_runtime')
                            searcher.timer_manager.create_timer('exploration_runtime')
                            searcher.timer_manager.create_timer('choose_node_runtime')
                            searcher.timer_manager.create_timer('expand_runtime')
                            logging.info("Starting %d-clique search from %s with %s !!" % (k,root_page,clique_algorithm))                        
                            searcher.run(root_pages[root_page], k,exploration_limit)                
                            first_configuration=False
                        except urllib2.HTTPError:
                            logging.info("Blocked after %d nodes" % len(searcher.ckg.nodes()))
                            
                            if first_configuration: # Then shouldn't try again
                                # Gather results
                                record = dict()
 
                                record['iterations'] = searcher.algorithm.iteration
                                record['total_runtime']=searcher.timer_manager.get_total_time('total_runtime')
                                record['exp loration_runtime']=searcher.timer_manager.get_total_time('exploration_runtime')
                                record['choose_node_runtime']=searcher.timer_manager.get_total_time('choose_node_runtime')
                                record['expand_runtime']=searcher.timer_manager.get_total_time('expand_runtime')
                                
                                
                                out_file.write("%s\t " % root_page)            
                                out_file.write("%d\t " % k)
                                out_file.write("%d\t " % iteration)
                                out_file.write("%s\t " % clique_algorithm)                
                                out_file.write("%d\t " % record['total_runtime'])
                                out_file.write("%d\t " % record['iterations'])
                                out_file.write("%d\t " % record['exploration_runtime'])
                                out_file.write("%d\t " % record['choose_node_runtime'])
                                out_file.write("%d\t " % record['expand_runtime'])
                                out_file.write("[]\t ")
                                out_file.write("False\n")
                                out_file.flush()                            
                            
                            
                            out_file.close()
                            sys.exit()
                        
                        # Gather results
                        record = dict()
                        if searcher.algorithm.done==True:
                            record['clique'] = searcher.algorithm.output_clique()
                        else:
                            record['clique'] = []
                        record['iterations'] = searcher.algorithm.iteration
                        record['total_runtime']=searcher.timer_manager.get_total_time('total_runtime')
                        record['exploration_runtime']=searcher.timer_manager.get_total_time('exploration_runtime')
                        record['choose_node_runtime']=searcher.timer_manager.get_total_time('choose_node_runtime')
                        record['expand_runtime']=searcher.timer_manager.get_total_time('expand_runtime')
                        
                        
                        out_file.write("%s\t " % root_page)            
                        out_file.write("%d\t " % k)
                        out_file.write("%d\t " % iteration)
                        out_file.write("%s\t " % clique_algorithm)                
                        out_file.write("%d\t " % record['total_runtime'])
                        out_file.write("%d\t " % record['iterations'])
                        out_file.write("%d\t " % record['exploration_runtime'])
                        out_file.write("%d\t " % record['choose_node_runtime'])
                        out_file.write("%d\t " % record['expand_runtime'])
                        out_file.write("%s\t" % record['clique'])
                        out_file.write("%s\n" % searcher.algorithm.done)
                        out_file.flush()
                        sleep_time = (600 + random.Random().random()*600)
                        time.sleep(sleep_time)
            
def replace_ip():
    os.system("ipconfig /release")
    os.system("ipconfig /renew")
    TEST_PAGE = "HTTP://www.ynet.co.il"
    while True:
        request = urllib2.Request(TEST_PAGE)
        request.add_header('User_agent','Ugrah/0.1') # Ugrah is not important, just copied from Orchid
        site = urllib2.urlopen(request)
        site_content = site.read()
        sleep_time = (61 + random.Random().random()*30)
        time.sleep(sleep_time)
        if site.headers.type == "text/html":
            break
    logging.info("IP Replaced?")

main()    
