from pprint import pprint
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from typing import Tuple, Dict
import gym
from gym import spaces
from gplearn.genetic import SymbolicRegressor
from gplearn.fitness import make_fitness
import pandas as pd
from sklearn.metrics import mean_squared_error

class Nguyen2Dataset:
    def __init__(self):
        # 生成Nguyen-2数据集
        self.X = np.linspace(-1, 1, 100).reshape(-1, 1)
        self.y = self.X**2 + self.X + 1

class ParamOptimizationEnv(gym.Env):
    def __init__(self, param_dim: int = 4):
        super().__init__()
        self.param_dim = param_dim
        
        # 定义动作空间（参数空间）
        self.action_space = spaces.Box(
            low=0.0,
            high=1.0,
            shape=(param_dim,),
            dtype=np.float32
        )
        
        # 定义观察空间（当前MSE和参数）
        self.observation_space = spaces.Box(
            low=-np.inf,
            high=np.inf,
            shape=(param_dim + 1,),  # MSE + 参数
            dtype=np.float32
        )
        
        self.current_mse = None
        self.current_params = None
        
        # 初始化数据集
        self.dataset = Nguyen2Dataset()
        
        # 初始化gplearn评估器
        self.evaluator = SymbolicRegressor(
            population_size=500,
            generations=20,
            stopping_criteria=0.01,
            p_crossover=0.7,
            p_subtree_mutation=0.1,
            p_hoist_mutation=0.05,
            p_point_mutation=0.1,
            max_samples=0.9,
            verbose=0,
            random_state=42
        )
        
    def reset(self):
        # 初始化参数
        self.current_params = np.random.uniform(0, 1, self.param_dim)
        # 归一化参数使其总和为1
        self.current_params = self.current_params / np.sum(self.current_params)
        self.current_mse = 1.0  # 初始MSE
        return self._get_observation()
    
    def step(self, action: np.ndarray) -> Tuple[np.ndarray, float, bool, Dict]:
        # 更新参数并归一化
        self.current_params = np.clip(action, 0, 1)
        self.current_params = self.current_params / np.sum(self.current_params)
        
        # 更新gplearn参数
        self.evaluator.p_crossover = self.current_params[0]
        self.evaluator.p_subtree_mutation = self.current_params[1]
        self.evaluator.p_hoist_mutation = self.current_params[2]
        self.evaluator.p_point_mutation = self.current_params[3]
        
        # 使用gplearn进行训练和评估
        try:
            self.evaluator.fit(self.dataset.X, self.dataset.y)
            y_pred = self.evaluator.predict(self.dataset.X)
            new_mse = mean_squared_error(self.dataset.y, y_pred)
        except Exception as e:
            print(f"训练出错: {str(e)}")
            new_mse = 1.0  # 如果训练失败，返回一个较大的MSE值
        
        # 计算奖励（MSE的负值，因为我们要最小化MSE）
        reward = -new_mse
        
        # 更新当前MSE
        self.current_mse = new_mse
        
        # 如果MSE足够小，认为任务完成
        done = new_mse < 0.01
        
        return self._get_observation(), reward, done, {}
    
    def _get_observation(self) -> np.ndarray:
        return np.concatenate([[self.current_mse], self.current_params])

class PPOMemory:
    def __init__(self):
        self.states = []
        self.actions = []
        self.probs = []
        self.vals = []
        self.rewards = []
        self.dones = []
    
    def clear(self):
        self.states = []
        self.actions = []
        self.probs = []
        self.vals = []
        self.rewards = []
        self.dones = []

class ActorNetwork(nn.Module):
    def __init__(self, input_dims: int, n_actions: int):
        super().__init__()
        self.actor = nn.Sequential(
            nn.Linear(input_dims, 64),
            nn.ReLU(),
            nn.Linear(64, 64),
            nn.ReLU(),
            nn.Linear(64, n_actions),
            nn.Sigmoid()
        )
    
    def forward(self, state):
        return self.actor(state)

class CriticNetwork(nn.Module):
    def __init__(self, input_dims: int):
        super().__init__()
        self.critic = nn.Sequential(
            nn.Linear(input_dims, 64),
            nn.ReLU(),
            nn.Linear(64, 64),
            nn.ReLU(),
            nn.Linear(64, 1)
        )
    
    def forward(self, state):
        return self.critic(state)

class PPOAgent:
    def __init__(self, input_dims: int, n_actions: int, lr: float = 0.0003,
                 gamma: float = 0.99, gae_lambda: float = 0.95,
                 policy_clip: float = 0.2, batch_size: int = 64,
                 n_epochs: int = 10):
        self.gamma = gamma
        self.policy_clip = policy_clip
        self.n_epochs = n_epochs
        self.gae_lambda = gae_lambda
        self.batch_size = batch_size
        self.clip_ratio = policy_clip
        
        self.actor = ActorNetwork(input_dims, n_actions)
        self.critic = CriticNetwork(input_dims)
        
        # 使用单个优化器
        self.optimizer = torch.optim.Adam([
            {'params': self.actor.parameters(), 'lr': lr},
            {'params': self.critic.parameters(), 'lr': lr}
        ])
        
        self.memory = PPOMemory()
    
    def choose_action(self, observation):
        state = torch.tensor(np.array([observation]), dtype=torch.float)
        probs = self.actor(state)
        dist = torch.distributions.Beta(probs, 1-probs)
        action = dist.sample()
        
        value = self.critic(state)
        
        self.memory.states.append(observation)
        self.memory.actions.append(action)
        self.memory.probs.append(dist.log_prob(action))
        self.memory.vals.append(value)
        
        return action.numpy()[0]
    
    def learn(self):
        for _ in range(self.n_epochs):
            state_arr = np.array(self.memory.states)
            old_prob_arr = self.memory.probs
            action_arr = self.memory.actions
            value_arr = self.memory.vals
            reward_arr = self.memory.rewards
            done_arr = self.memory.dones
            
            advantage = np.zeros(len(reward_arr), dtype=np.float32)
            for t in range(len(reward_arr)-1):
                discount = 1
                a_t = 0
                for k in range(t, len(reward_arr)-1):
                    a_t += discount*(reward_arr[k] + self.gamma*value_arr[k+1]*(1-done_arr[k]) - value_arr[k])
                    discount *= self.gamma*self.gae_lambda
                advantage[t] = a_t
            advantage = torch.tensor(advantage)
            
            for batch in range(0, len(state_arr), self.batch_size):
                states = torch.tensor(state_arr[batch:batch+self.batch_size], dtype=torch.float)
                old_probs = torch.cat(old_prob_arr[batch:batch+self.batch_size])
                actions = torch.cat(action_arr[batch:batch+self.batch_size])
                advantages = advantage[batch:batch+self.batch_size]
                
                # 计算新的动作概率和状态值
                probs = self.actor(states)
                dist = torch.distributions.Beta(probs, 1-probs)
                new_probs = dist.log_prob(actions).exp()
                values = self.critic(states)
                
                # 计算概率比率
                prob_ratio = new_probs / old_probs
                
                # 计算PPO损失
                weighted_probs = prob_ratio * advantages
                weighted_clipped_probs = torch.clamp(prob_ratio, 1-self.clip_ratio, 1+self.clip_ratio) * advantages
                actor_loss = -torch.min(weighted_probs, weighted_clipped_probs).mean()
                
                # 计算价值损失
                returns = advantages + values.detach()
                critic_loss = (returns - values).pow(2).mean()
                
                # 计算熵损失
                entropy_loss = -dist.entropy().mean()
                
                # 总损失
                total_loss = actor_loss + 0.5*critic_loss - 0.01*entropy_loss
                
                # 优化
                self.optimizer.zero_grad()
                total_loss.backward()
                self.optimizer.step()
        
        self.memory.clear()

def train_rl_optimizer(n_episodes: int = 1000):
    env = ParamOptimizationEnv()
    agent = PPOAgent(
        input_dims=env.observation_space.shape[0],
        n_actions=env.action_space.shape[0],
        lr=0.0001,  # 降低学习率以提高稳定性
        batch_size=32,  # 减小批次大小
        n_epochs=5  # 减少每轮训练的epoch数
    )
    
    best_mse = float('inf')
    best_params = None
    best_program = None
    
    for episode in range(n_episodes):
        observation = env.reset()
        done = False
        score = 0
        
        while not done:
            action = agent.choose_action(observation)
            observation_, reward, done, info = env.step(action)
            score += reward
            
            agent.memory.rewards.append(reward)
            agent.memory.dones.append(done)
            
            observation = observation_
        
        agent.learn()
        
        # 记录最佳参数和程序
        if -score < best_mse:
            best_mse = -score
            best_params = env.current_params
            best_program = env.evaluator._program
        
        if (episode + 1) % 10 == 0:
            print(f"\nEpisode {episode+1}/{n_episodes}")
            print(f"Score: {score:.4f}")
            print(f"Best MSE: {best_mse:.4f}")
            print(f"Best Parameters: {best_params}")
            if best_program is not None:
                print(f"Best Program: {best_program}")
            print("------------------------")
    
    return best_params, best_mse, best_program

if __name__ == "__main__":
    best_params, best_mse, best_program = train_rl_optimizer()
    print("\n最终结果:")
    print(f"最佳MSE: {best_mse:.4f}")
    print("最佳参数:")
    param_names = ["p_crossover", "p_subtree_mutation", "p_hoist_mutation", "p_point_mutation"]
    for name, value in zip(param_names, best_params):
        print(f"{name}: {value:.4f}")
    print(f"\n最佳程序: {best_program}") 