##from ..kernel import Khronos
##from ..support import Namespace
##
##class BruteforceOptimizer(object):
##    """Brute-force solver for problems with discrete variables. Tests out all possible 
##    combinations of the variables and returns the one with lowest objective function value."""
##    def solve(self, problem, objective, model, experiment, simulator=None):
##        if simulator is None:
##            simulator = Khronos()
##        if simulator.model is not model:
##            simulator.attach(model)
##        simulator.stack.trace.disable()
##        
##        score = {} # mapping {config->score}
##        try:
##            for config in problem.solutions():
##                print "Running configuration:", config
##                problem.apply(config, model)
##                config = tuple(config.iteritems())
##                exp = experiment()
##                simulator.run_experiment(exp)
##                result = objective(exp)
##                score[config] = result
##                print "\tObjective function =", result, "\n"
##            print "Done"
##        except KeyboardInterrupt:
##            print "Interrupted"
##        return score
##        
##class STSOptimizer(object): pass
##class SPSAOptimizer(object): pass
##class FRaceOptimizer(object): pass
##optimizer = Namespace(bruteforce=BruteforceOptimizer, 
##                      sts=STSOptimizer, 
##                      spsa=SPSAOptimizer, 
##                      frace=FRaceOptimizer)

#class FRaceOptimizer(object):
#    from random import choice
#    from time import time
#    from math import sqrt
#    from scipy.stats import chi2 as chi_square, t as student_t
#    from .utils import mk_logger, mean, INF
#    
#    def solve(self, problem, experiment, model, simulator=None):
#        if simulator is None:
#            simulator = Khronos()
#        if simulator.model is not model:
#            simulator.attach(model)
#        simulator.stack.trace.disable()
#        
#        configs = problem.combinations(model)
#        return f_race(problem.combinations(model))
#    
#
#def f_race(configs, instances, cost, alpha=0.05, max_steps=INF, verbose=True):
#    """Racing algorithm arguments:
#        configs - the initial set of possible configurations to test. In this case, each config 
#            will simply a value of the P parameter for the PFO heuristic.
#        instances - the full pool of test instances.
#        cost - the cost function associated with a particular instance and configuration.
#            this function should run a semigreedy procedure with the given configuration, and 
#            return as cost the best (minimum) number of reshuffles obtained. the resources used 
#            for evaluating the cost of a configuration should be the same for all configurations, 
#            i.e. either run the same number of simulations, or use the same limit on CPU time for 
#            instance.
#        alpha - the significance level of the f-test. The significance level of the t-tests are 
#            equal to alpha/2.
#        max_steps - a maximum limit on the number of steps of the race."""
#    configs = set(configs)
#    instances = list(instances)
#    ranks = dict((config, []) for config in configs)
#    step = 1
#    log = mk_logger(verbose)
#    
#    log("Starting F-race with %d candidate configurations" % (len(configs),))
#    while len(configs) > 1 and step <= max_steps and len(instances) > 0:
#        log("Step %d" % (step,))
#        instance = choice(instances)
#        log("\tRunning instance %s" % (instance,))
#        # Run the configs and save the scores of this step.
#        scores = {}
#        for config in configs:
#            scores[config] = cost(config, instance)
#        # Make the Friedman test and discard suboptimal configurations.
#        suboptimal = f_test(scores, ranks, step, alpha)
#        configs.difference_update(suboptimal)
#        log("\tDropped %s" % (suboptimal,))
#        log("\t%d configurations left" % (len(configs),))
#        step += 1
#    log("Finished F-race")
#    return configs, ranks
#    
#def f_test(scores, ranks, step, alpha):
#    """Friedman statistical test. This function must return a list of configurations which should
#    be dropped by F-race in the current step, considering the results of the F-test, and, if 
#    applicable, the pairwise t-tests. If the F-test fails to reject the null-hypothesis, an empty
#    list of configurations is returned, meaning that all of them continue in the race."""
#    # Create the list of configuration ranks for this step.
#    step_ranks = mk_ranks(scores)
#    # Update the global list of configuration ranks.
#    for config, rank in step_ranks:
#        ranks[config].append(rank)
#        
#    # Calculate the test statistic T, as provided by Stutzle's paper.
#    k = step
#    n = len(scores)
#    T_up = 0.0
#    for config in scores.iterkeys():
#        T_up += (sum(ranks[config]) - k * (n+1) / 2.0) ** 2
#    T_up *= (n - 1)
#    T_down = 0.0
#    for config in scores.iterkeys():
#        for l in xrange(step):
#            T_down += (ranks[config][l] ** 2)
#    T_down -= (k * n * ((n+1) ** 2)) / 4.0
#    T = T_up / T_down
#    
#    # Now we compare the statistic T to the 1-alpha quantile of a chi-square distribution 
#    # with n-1 degrees of freedom. If T exceeds this value, the null-hypothesis is rejected 
#    # and the pairwise t-tests are made.
#    suboptimal = []
#    if T > chi_square.ppf(1 - alpha, n - 1):
#        U_max = student_t.ppf(1 - alpha / 2, n - 1)
#        best_candidate = step_ranks[0][0] # DOUBT HERE. WHAT IS THE BEST CANDIDATE?
#        for config, _ in step_ranks[1:]:
#            U_up = abs(sum(ranks[best_candidate]) - sum(ranks[config]))
#            U_down = sqrt(2 * k * (1 - T / (k * (n-1))) * T_down / ((k-1) * (n-1)))
#            U = U_up / U_down
#            if U > U_max:
#                suboptimal.append(config)
#    return suboptimal
#    
#def mk_ranks(scores):
#    """Build a list assigning a rank to each configuration according to its score. In case of 
#    tied scores, the average of the ranks that would be assigned without ties is given to each 
#    of the tied configurations.
#    Example:
#        >>> mk_ranks(dict(a=4.0, b=4.5, c=4.0, d=3.9, e=4.0))
#        [('d', 1.0), ('a', 3.0), ('c', 3.0), ('e', 3.0), ('b', 5.0)]
#        
#        >>> mk_ranks(dict(a=4.0, b=4.5, c=4.0, d=3.9,))
#        [('d', 1.0), ('a', 2.5), ('c', 2.5), ('b', 4.0)]
#    """
#    ranks = []
#    buffer = []
#    buffer_score = None
#    rank = 1
#    for config, score in sorted(scores.iteritems(), key=lambda (c, s): s):
#        if score == buffer_score:
#            # If this config is tied with the configs in the buffer, add it 
#            # to the buffer.
#            buffer.append((config, rank))
#        else:
#            # New score (higher). If the buffer contains anything, extend the ranks list, 
#            # assigning to each config the average rank that would be assigned without ties.
#            if len(buffer) > 0:
#                avg_brank = mean([brank for _, brank in buffer])
#                for bconfig, _ in buffer:
#                    ranks.append((bconfig, avg_brank))
#            # Clear the buffer and reset the buffer_score variable.
#            buffer = [(config, rank)]
#            buffer_score = score
#        rank += 1
#        
#    # Finally extend the ranks list with the final contents of the buffer.
#    if len(buffer) > 0:
#        avg_brank = mean([brank for _, brank in buffer])
#        for bconfig, _ in buffer:
#            ranks.append((bconfig, avg_brank))
#    return ranks
