'''
Created on May 11, 2010

@author: roni
'''
import logging
from projectConstants import ProjectConstants
from generators.randomScenario import RandomScenario
import os
import random
from ns2Scenario import Ns2Scenario
from experimentRunner import ExperimentRunner
from traceParser import TraceParser
from diagnosers.linearProgrammingDiagnoser import LinearProgrammingDiagnoser
from gde import GDE
from diagnosers.minimalCardinalityGde import MinimalCardinalityGde
from diagnosisEnvironment import DiagnosisEnvironment
from diagnosers.linearProgrammingGde import LinearProgrammingGde
from diagnosers.sherlock import Sherlock
import time

DELIMITER = ","

def choose_abnormals(scenario, normal_probability):
    ''' Add random delays to nodes on agents paths. At least one node will be added,
    other nodes on paths will be added according to the abnormal probability (i.i.d) '''    
    possible_abnormals = set()
    # Abnormal candidates: all nodes on the paths of the agents
    for agent in scenario.agents:
        possible_abnormals.update(scenario.agent_to_path[agent])
        
    # Current GDE focuses on abnormals in the paths
    for agent in scenario.agents:
        possible_abnormals.difference_update(scenario.agent_to_flow[agent])
       
    # Make at least one node to be abnormal
    added_delay = Ns2Scenario.LINK_DELAY*1000
    node_to_delay = dict()
    possible_abnormals = list(possible_abnormals)
    abnormal_index = random.randint(0,len(possible_abnormals)-1)
    abnormal = possible_abnormals[abnormal_index]
    node_to_delay[abnormal] = added_delay
    logging.info("Added %d ms delay to node %s" % (added_delay,abnormal))    
    possible_abnormals.remove(abnormal)
    
    # Add abnormals according to the given probabilty
    for node in possible_abnormals:
        if random.random()>normal_probability:
            node_to_delay[node]=added_delay
            logging.info("Added %d ms delay to node %s" % (added_delay,node))
    
    return node_to_delay

    
def load_previous_results(results_file_name,APPEND_RESULTS):
    results_file_name = "%s/AllResults.csv" % ProjectConstants.RESROUCE_DIR
    configurations = set()
    if APPEND_RESULTS==False:    
        if os.path.exists(results_file_name):
            # Backup old results
            backup_index = 0        
            backup_file_name = "%s/AllResults.backup%d.csv" % (ProjectConstants.RESROUCE_DIR,backup_index)
            while (os.path.exists(backup_file_name)):
                backup_index = backup_index+1        
                backup_file_name = "%s/AllResults.backup%d.csv" % (ProjectConstants.RESROUCE_DIR,backup_index)            
            os.rename(results_file_name, backup_file_name)
    else:
        # Load old results
        if os.path.exists(results_file_name):
            results_file = file(results_file_name,'r')
            header = results_file.readline()
            if len(header)>0:
                for line in results_file.readlines():
                    parts = line.split("%s" % DELIMITER)
                    configuration = (int(parts[0].strip()),int(parts[1].strip()),int(parts[2].strip()),float(parts[3].strip()),int(parts[4].strip()))
                    configurations.add(configuration)
            results_file.close()
    return configurations

def is_old_configuration(new_configuration, configurations):
    for configuration in configurations:
        if configuration==new_configuration:
            return True
        is_equal = True
        for i in xrange(len(configuration)):
            # If int parameter - strict compare
            if int(configuration[i])==configuration[i]:
                if configuration[i]!=new_configuration[i]:
                    is_equal=False
                    break
            else:
                if abs(configuration[i]-new_configuration[i])>0.001:
                    is_equal=False
                    break
        if is_equal:
            return True
    return False

def run_instance(engine,loaded_environment,configuration,results_file):
    ''' Runs an engine on an instance of the problem, and output the results '''
    logging.info("Running (%s) ..." % engine.name)
    results = runner.diagnose(engine, loaded_environment.scenario, 
         loaded_environment.observations.flow_to_details,
         loaded_environment.added_delays,verify=False)
    logging.info("Success (%s)! outputting results..." % engine.name)
    for parameter in configuration:
        results_file.write("%s%s" % (parameter,DELIMITER))
    results_file.write("%s%s" % (engine.name, DELIMITER))                                                                
    for result in results:
        results_file.write("%s%s" % (result,DELIMITER))
    results_file.write("\n")
    logging.info("Flushing to file...")
    results_file.flush() 

def run_instance_from_file(engine, file_prefix):
    ''' Convenience function for debug. Receives environment file and an engine, and runs it. '''
    environment_dir = "%s/environments" % ProjectConstants.RESROUCE_DIR
    full_file_prefix = "%s/%s" % (environment_dir,file_prefix)
    logging.info("Loading %s..." % file_prefix)
    loaded_environment = DiagnosisEnvironment(full_file_prefix)    
    logging.info("Running %s [%s]..." % (file_prefix, time.ctime()))
    runner = ExperimentRunner()    
    results = runner.diagnose(engine, loaded_environment.scenario, 
         loaded_environment.observations.flow_to_details,
         loaded_environment.added_delays,verify=False)
        

if __name__ == '__main__':
    logging.basicConfig(level=logging.INFO)
    APPEND_RESULTS = False
    APPEND_FILE = True
    results_file_name = "%s/AllResults.csv" % ProjectConstants.RESROUCE_DIR
    configurations = load_previous_results(results_file_name, APPEND_RESULTS)
    if (APPEND_RESULTS==False and APPEND_FILE==False) or os.path.exists(results_file_name)==False:
        results_file = file(results_file_name,'w')        
        results_file.write("Nodes%(del)sAddedEdges%(del)sAgents%(del)sAb.Prob%(del)sIteration%(del)sAlgorithm%(del)s#Diagnosises%(del)sTotalRuntime%(del)sMinCardinality%(del)sStopped\n" % {'del':','})        
        results_file.close()
        
#    node_range = [100,400,600,800,1000]
#    added_edges_range = [2]    
#    agents_range = [5,15,50,60,70]    
#    abnormal_probability_steps_range = [1,6]#xrange(6,7)
    
    #engines = [GDE(), LinearProgrammingGde(), MinimalCardinalityGde()]   
    engines = [GDE(False), GDE(True), MinimalCardinalityGde(),LinearProgrammingGde(), Sherlock(True),Sherlock(False)]
#    engines = [GDE(True), MinimalCardinalityGde(), Sherlock()]

       
    node_range = [300,700,900,500,1000]
    added_edges_range = [2]    
    agents_range = [5,15,50,60,70]
    normal_probability_range = [ 0.95,0.8]
    iterations = xrange(50)      
    environment_dir = "%s/environments" % ProjectConstants.RESROUCE_DIR
    results_file = file(results_file_name,'a')
    runner = ExperimentRunner()
    for nodes in node_range:
            for added_edges in added_edges_range:
                for agents in agents_range:
                    for normal_probability in normal_probability_range:
                        for iteration in iterations:                            
                            configuration = (nodes, added_edges, agents,normal_probability,iteration)
                            if APPEND_RESULTS and is_old_configuration(configuration,configurations): 
                                logging.info("Skipped old configuration Env-%s.%s.%s.%3.2f.%s" % (nodes, added_edges, agents, normal_probability, iteration))
                                continue                                  
                            file_prefix = "Env-%s.%s.%s.%3.2f.%s" % (nodes, added_edges, agents, normal_probability, iteration)
                            full_file_prefix = "%s/%s" % (environment_dir,file_prefix)
                            logging.info("Loading %s..." % file_prefix)
                            loaded_environment = DiagnosisEnvironment(full_file_prefix)    
                            logging.info("Running %s [%s]..." % (file_prefix, time.ctime()))
                            map(lambda x: run_instance(x, loaded_environment, configuration, results_file), engines)
                                
                                                            
    results_file.close()
    
    
    
      
