import numpy as np
from .base_optimizer import BaseOptimizer

class SimpleMLPAgent:
    def __init__(self, state_dim, action_dim, hidden_dim=32, lr=0.01):
        self.state_dim = state_dim
        self.action_dim = action_dim
        self.lr = lr
        # 初始化参数
        self.W1 = np.random.randn(state_dim, hidden_dim) * 0.1
        self.b1 = np.zeros((1, hidden_dim))
        self.W2 = np.random.randn(hidden_dim, action_dim) * 0.1
        self.b2 = np.zeros((1, action_dim))

    def forward(self, state):
        z1 = state @ self.W1 + self.b1
        a1 = np.tanh(z1)
        z2 = a1 @ self.W2 + self.b2
        exp_logits = np.exp(z2 - np.max(z2, axis=1, keepdims=True))
        probs = exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
        return probs, a1

    def sample_action(self, state):
        probs, _ = self.forward(state)
        action = np.array([np.random.choice(len(p), p=p) for p in probs])
        return action, probs

    def update(self, state, a1, action, probs, reward):
        # 简单策略梯度更新（单步）
        dlog = np.zeros_like(probs)
        dlog[np.arange(len(action)), action] = 1 / (probs[np.arange(len(action)), action] + 1e-8)
        dlog = (dlog - probs) * reward[:, None]
        dz2 = dlog
        dW2 = a1.T @ dz2
        db2 = np.sum(dz2, axis=0, keepdims=True)
        da1 = dz2 @ self.W2.T
        dz1 = da1 * (1 - a1 ** 2)
        dW1 = state.T @ dz1
        db1 = np.sum(dz1, axis=0, keepdims=True)
        # 梯度下降
        self.W1 += self.lr * dW1
        self.b1 += self.lr * db1
        self.W2 += self.lr * dW2
        self.b2 += self.lr * db2

class RLDE(BaseOptimizer):
    def __init__(self, dim, pop_size=50, max_iter=1000, state_dim=64, action_dim=3, lr=0.01):
        super().__init__(dim, pop_size, max_iter)
        self.state_dim = state_dim
        self.action_dim = action_dim
        self.agent = SimpleMLPAgent(state_dim, action_dim, hidden_dim=32, lr=lr)
        self.F_range = [0.1, 0.9]
        self.CR_range = [0.1, 0.9]

    def get_state(self, population, fitness, best_fitness):
        # 直接用种群均值和fitness均值拼接
        state = np.concatenate([
            np.mean(population, axis=0),
            [np.mean(fitness)],
            [best_fitness]
        ])
        return state.reshape(1, -1)

    def optimize(self, objective_func, bounds=(-5, 5), model=None):
        self.model = model
        population = self.initialize_population(bounds)
        fitness = np.array([objective_func(ind) for ind in population])
        best_idx = np.argmin(fitness)
        self.best_solution = population[best_idx].copy()
        self.best_fitness = fitness[best_idx]
        for gen in range(self.max_iter):
            for i in range(self.pop_size):
                state = self.get_state(population, fitness, self.best_fitness)
                action, probs = self.agent.sample_action(state)
                action = action[0]
                F = self.F_range[0] + (self.F_range[1] - self.F_range[0]) * action / (self.action_dim - 1)
                CR = self.CR_range[0] + (self.CR_range[1] - self.CR_range[0]) * action / (self.action_dim - 1)
                idxs = [idx for idx in range(self.pop_size) if idx != i]
                a, b, c = population[np.random.choice(idxs, 3, replace=False)]
                mutant = a + F * (b - c)
                mutant = self.clip_to_bounds(mutant, bounds)
                cross_points = np.random.random(self.dim) < CR
                if not np.any(cross_points):
                    cross_points[np.random.randint(0, self.dim)] = True
                trial = np.where(cross_points, mutant, population[i])
                f = objective_func(trial)
                reward = np.array([(fitness[i] - f) / (abs(fitness[i]) + 1e-10)])
                # 更新策略网络
                _, a1 = self.agent.forward(state)
                self.agent.update(state, a1, np.array([action]), probs, reward)
                if f < fitness[i]:
                    fitness[i] = f
                    population[i] = trial
                    if f < self.best_fitness:
                        self.best_fitness = f
                        self.best_solution = trial.copy()
        return self.best_solution, self.best_fitness 