import numpy as np
import torch
from torch import nn
from torch import optim
from sklearn.utils import check_random_state
import os
import matplotlib.pyplot as plt

class SimpleNN(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(SimpleNN, self).__init__()
        self.input_hidden = nn.Linear(input_size, hidden_size)
        self.hidden_output = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        hidden = torch.sigmoid(self.input_hidden(x))
        output = self.hidden_output(hidden)
        return output

class GeneticAlgorithmBP:
    def __init__(self, num_inputs, num_hidden, num_outputs, population_size, mutation_rate, learning_rate, decay_rate, device):
        self.num_inputs = num_inputs
        self.num_hidden = num_hidden
        self.num_outputs = num_outputs
        self.learning_rate = learning_rate
        self.decay_rate = decay_rate
        self.device = device

        self.population = []
        self.population_size = population_size   # 种群大小
        self.mutation_rate = mutation_rate    # 变异率
        self.fitness_values = np.zeros(population_size)   # 适应度值数组
        self.random_state = check_random_state(None)     # 随机种子

    def initialize_population(self):
        for _ in range(self.population_size):
            model = SimpleNN(self.num_inputs, self.num_hidden, self.num_outputs).to(self.device)
            self.population.append(model)

    def evaluate_fitness(self, x, y):
        for i in range(self.population_size):
            model = self.population[i]
            model.eval()
            with torch.no_grad():
                output_activations = model(x)
                error = torch.mean((output_activations - y) ** 2).item()
                self.fitness_values[i] = 1 / (error + 1e-8)

    def selection(self):
        total_fitness = np.sum(self.fitness_values)
        selection_probs = self.fitness_values / total_fitness
        selected_indices = self.random_state.choice(np.arange(self.population_size), size=self.population_size, replace=True, p=selection_probs)
        selected_population = [self.population[idx] for idx in selected_indices]
        self.population = selected_population

    def crossover(self):
        offspring_population = []
        total_fitness = np.sum(self.fitness_values)
        selection_probs = self.fitness_values / total_fitness
        for _ in range(self.population_size):
            parent1_idx = self.random_state.choice(np.arange(self.population_size), p=selection_probs)
            parent2_idx = self.random_state.choice(np.arange(self.population_size), p=selection_probs)
            parent1 = self.population[parent1_idx]
            parent2 = self.population[parent2_idx]

            child = SimpleNN(self.num_inputs, self.num_hidden, self.num_outputs).to(self.device)
            with torch.no_grad():
                for param1, param2, param_child in zip(parent1.parameters(), parent2.parameters(), child.parameters()):
                    crossover_point = self.random_state.randint(0, param1.numel())
                    param_child.data.view(-1)[:crossover_point] = param1.data.view(-1)[:crossover_point]
                    param_child.data.view(-1)[crossover_point:] = param2.data.view(-1)[crossover_point:]
            offspring_population.append(child)
        self.population = offspring_population

    def mutation(self):
        for model in self.population:
            with torch.no_grad():
                for param in model.parameters():
                    if self.random_state.rand() < self.mutation_rate:
                        param.data += torch.tensor(np.random.uniform(low=-0.1, high=0.1, size=param.data.shape), device=self.device, dtype=torch.float32)

    def get_learning_rate(self, generation):
        return self.learning_rate * (self.decay_rate ** generation)

    def train(self, x, y, num_generations, target_error):
        self.initialize_population()

        x = x.to(self.device)
        y = y.to(self.device)

        loss_curve = []

        for generation in range(num_generations):
            self.evaluate_fitness(x, y)
            self.selection()
            self.evaluate_fitness(x, y)
            self.crossover()
            self.mutation()

            best_fitness = np.max(self.fitness_values)
            best_index = np.argmax(self.fitness_values)
            model = self.population[best_index]

            model.train()
            current_lr = self.get_learning_rate(generation)

            optimizer = optim.SGD(model.parameters(), lr=current_lr)
            optimizer.zero_grad()

            output_activations = model(x)
            loss = torch.mean((output_activations - y) ** 2)
            loss.backward()
            optimizer.step()
            error = loss.item()

            loss_curve.append(error)

            print(f"Generation {generation}: Loss = {error}, Learning Rate = {current_lr}")

            if error <= target_error:
                print(f"Target error reached: {error} at generation {generation}")
                break

        best_model = self.population[np.argmax(self.fitness_values)]

        plt.plot(loss_curve)
        plt.xlabel('Generation')
        plt.ylabel('Mean Squared Error (MSE)')
        plt.title('MSE vs. Generation')
        plt.grid(True)
        plt.show()

        torch.save(best_model.state_dict(), 'trained of H_model.pth')

        return best_model

def load_data(filename):
    data = np.loadtxt(filename, delimiter=',')
    X = data[:, 1:3]
    y = data[:, -1].reshape(-1, 1)
    return X, y

if __name__ == '__main__':
    num_inputs = 2
    num_hidden = 7
    num_outputs = 1
    population_size = 100
    mutation_rate = 0.2
    learning_rate = 0.1
    decay_rate = 0.99
    num_generations = 500
    target_error = 0.00003

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f'Using device: {device}')

    ga_bp = GeneticAlgorithmBP(num_inputs, num_hidden, num_outputs, population_size, mutation_rate, learning_rate, decay_rate, device)

    x_train, y_train = load_data('train of H_dataset.txt')
    x_train = torch.tensor(x_train, dtype=torch.float32)
    y_train = torch.tensor(y_train, dtype=torch.float32)
    x_train = x_train.to(device)
    y_train = y_train.to(device)

    model = ga_bp.train(x_train, y_train, num_generations, target_error)


