import math
import random
from tqdm import tqdm
from functools import cmp_to_key
from .Crossover import Crossover
from .Mutation import Mutator
from .GenericPrompt import GeneticPrompt
from .Evaluator import Evaluator
from Logger import logger

EPSILON = 1e-6


def float_equal(a, b):
    return abs(a - b) < EPSILON


def compare_function(ind1, ind2):
    # Implement your custom comparison logic
    # Return a negative number if a < b, zero if a == b, and positive if a > b
    assert len(ind1.scores) == len(ind2.scores)
    for i in range(len(ind1.scores)):
        if not float_equal(ind1.scores[i], ind2.scores[i]):
            return ind1.scores[i] - ind2.scores[i]
    return 0


def scores_to_weight(scores):
    return 1.0 / (1.0 - scores[0] + 0.01)


def scores_to_weight_v2(scores):
    x = 1.0 / (1.0 - scores[0] + 0.01)
    return math.exp(math.sqrt(x+scores[1]))


class GAOptimiser:
    def __init__(
            self,
            args,
    ):
        self.population_size = args.population_size
        self.mutation_rate = args.mutation_rate
        self.crossover_rate = args.crossover_rate
        self.max_generations = args.max_generations

        # self.population = []
        self.best_individual = None

        self.mutator = Mutator(args)
        self.crossover = Crossover()
        self.args = args
        self.evaluator = Evaluator(args)
        self.weight_fn = scores_to_weight_v2

    def _init_from_original(self, prompt_template):
        population = []
        first = GeneticPrompt(prompt_template, self.args)
        population.append(first)
        for _ in tqdm(range(self.population_size - 1), desc="Yielding the first generation"):
            population.append(self.mutator.run(first))
        return population

    def evolve(self, prompt_template, train_data, fitness_functions):
        population = self._init_from_original(prompt_template)
        for i in range(self.max_generations):
            logger.info(f"Generation {i}")
            self._evaluate(population, train_data, fitness_functions)
            elites = self._select(population)
            population = self._next_generation(elites)
            logger.info(f"Best individual: {self.best_individual.scores}")

        return self.best_individual

    def _evaluate(self, population, train_data, fitness_functions):
        for id, individual in tqdm(enumerate(population), desc="Evaluating individuals"):
            if len(individual.scores) > 0:
                logger.debug(f"Skip individual {id}: {individual.scores}")
                continue
            predictions = self.evaluator.run(train_data, individual.to_prompt_template())
            for i, fitness_fn in enumerate(fitness_functions):
                individual.scores.append(fitness_fn(train_data, predictions)[0])
            logger.debug(f"Individual {id}: {individual.scores}")

    def _select(self, population):
        sorted_population = sorted(population, key=cmp_to_key(compare_function), reverse=True)
        elites = sorted_population[:int(math.sqrt(self.population_size))]
        if self.best_individual is None or compare_function(self.best_individual, elites[0]) < 0:
            self.best_individual = elites[0]
        return elites

    def _next_generation(self, elites):
        probs = [self.weight_fn(ind.scores) for ind in elites]
        new_population = [elites[0]]
        while len(new_population) < self.population_size:
            parent1, parent2 = random.choices(elites, weights=probs, k=2)
            child1, child2 = parent1, parent2
            if random.random() < self.crossover_rate:
                child1, child2 = self.crossover.run(child1, child2)
            if random.random() < self.mutation_rate:
                child1 = self.mutator.run(child1)
            if random.random() < self.mutation_rate:
                child2 = self.mutator.run(child2)
            new_population.append(child1)
            new_population.append(child2)
        return new_population
