from pysr import PySRRegressor
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim

from sklearn.model_selection import train_test_split
from bishe_optim import WAIT_OPTIM, HyperparameterOptimizer, history_to_kwargs, init_history, postprocess_params, preprocess_params
from bishe_situations.utils import generate_data, plot_results, evaluate_model
from collections import deque

UNARY_OPS = ["neg", "exp", "sin", "square", "log", "cos", 
           "cube", "log10", "tan", "log2", "sqrt", "log1p", "abs"]


def optimize_hyperparameters(X, y, n_trials=10):
    # 划分训练集和验证集
    X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
    
    # 初始化模型和优化器
    model = HyperparameterOptimizer()
    optimizer = optim.Adam(model.parameters(), lr=1e-2)
    criterion = nn.MSELoss()
    
    best_mse = float('inf')
    best_params = None
    best_equation = None
    # 初始化历史记录
    global_history = deque(init_history(2), maxlen=2)
    global_history_mses = deque([float('inf')]*2, maxlen=2)
    
    for trial in range(n_trials):
        history = [preprocess_params(item) for item in global_history]
        history_tensor = torch.FloatTensor(np.array(history))  # shape: (batch_size, n_params)
        # print(history_tensor.shape)
        # 获取参数预测
        params: torch.Tensor = model(history_tensor)
        params_detached = params.clone().detach().numpy()
        # print(params_detached.shape)
        # 初始化累积loss
        total_loss = torch.zeros(1, requires_grad=True)
        
        for i, param in enumerate(params_detached):
            # 处理参数
            processed_param = postprocess_params(param)
            # print(processed_param)
            kwargs = history_to_kwargs(processed_param)
            
            # 训练PySR模型
            pysr_model = PySRRegressor(
                binary_operators=["+", "-", "*", "/", "^"],
                unary_operators=UNARY_OPS,
                niterations=10,
                population_size=20,
                maxsize=20,
                parsimony=0.1,
                verbosity=0,
                constraints={'^': (-5, 5)},
                **kwargs
            )
            pysr_model.fit(X_train, y_train)
            
            # 在验证集上评估
            y_pred = pysr_model.predict(X_val)
            
            # 将预测结果转换为tensor并保持梯度
            y_pred_tensor = torch.from_numpy(np.array(y_pred.flatten())).float()
            y_val_tensor = torch.from_numpy(np.array(y_val.flatten())).float()
            
            # 计算loss并保持梯度
            mse: torch.Tensor = criterion(y_val_tensor, y_pred_tensor)
            total_loss = total_loss + mse
            
            if mse.item() < global_history_mses[0]:
                best_mse = mse.item()
                best_params = kwargs.copy()
                best_equation = pysr_model.sympy()
                # 更新种群
                global_history.append(processed_param)
                global_history_mses.append(mse.item())
                # for h in global_history:
                #     print(h)
                print(f"Trial {trial+1}/{n_trials}, MSE: {mse.item():.4f}", pysr_model.sympy(), "#better#")
            else:
                print(f"Trial {trial+1}/{n_trials}, MSE: {mse.item():.4f}", pysr_model.sympy())
        # 计算平均loss
        total_loss = total_loss / len(params)
        print(f"Trial {trial+1}/{n_trials}, Total Loss: {total_loss.item():.4f}")

        # 反向传播和优化
        optimizer.zero_grad()
        total_loss.backward()
        optimizer.step()
    
    return best_equation, best_params, best_mse

from gplearn.genetic import SymbolicRegressor

def optimize_hyperparameters(X, y, n_trials=10):
    # 划分训练集和验证集
    X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
    
    # 初始化模型和优化器
    model = HyperparameterOptimizer()
    optimizer = optim.Adam(model.parameters(), lr=1e-2)
    criterion = nn.MSELoss()
    
    best_mse = float('inf')
    best_params = None
    best_equation = None
    # 初始化历史记录
    global_history = deque(init_history(2), maxlen=2)
    global_history_mses = deque([float('inf')]*2, maxlen=2)
    
    for trial in range(n_trials):
        history = [preprocess_params(item) for item in global_history]
        history_tensor = torch.FloatTensor(np.array(history))  # shape: (batch_size, n_params)
        # print(history_tensor.shape)
        # 获取参数预测
        params: torch.Tensor = model(history_tensor)
        params_detached = params.clone().detach().numpy()
        # print(params_detached.shape)
        # 初始化累积loss
        total_loss = torch.zeros(1, requires_grad=True)
        
        for i, param in enumerate(params_detached):
            # 处理参数
            processed_param = postprocess_params(param)
            # print(processed_param)
            kwargs = history_to_kwargs(processed_param)
            
            # 训练PySR模型
            pysr_model = SymbolicRegressor(
                function_set=UNARY_OPS,
                parsimony_coefficient=0.01,
                max_depth=5,
                verbose=0,
                random_state=42,
                n_jobs=-1,
                **kwargs
            )
            pysr_model.fit(X_train, y_train)
            
            # 在验证集上评估
            y_pred = pysr_model.predict(X_val)
            
            # 将预测结果转换为tensor并保持梯度
            y_pred_tensor = torch.from_numpy(np.array(y_pred.flatten())).float()
            y_val_tensor = torch.from_numpy(np.array(y_val.flatten())).float()
            
            # 计算loss并保持梯度
            mse: torch.Tensor = criterion(y_val_tensor, y_pred_tensor)
            total_loss = total_loss + mse
            
            if mse.item() < global_history_mses[0]:
                best_mse = mse.item()
                best_params = kwargs.copy()
                best_equation = pysr_model.sympy()
                # 更新种群
                global_history.append(processed_param)
                global_history_mses.append(mse.item())
                # for h in global_history:
                #     print(h)
                print(f"Trial {trial+1}/{n_trials}, MSE: {mse.item():.4f}", pysr_model.sympy(), "#better#")
            else:
                print(f"Trial {trial+1}/{n_trials}, MSE: {mse.item():.4f}", pysr_model.sympy())
        # 计算平均loss
        total_loss = total_loss / len(params)
        print(f"Trial {trial+1}/{n_trials}, Total Loss: {total_loss.item():.4f}")

        # 反向传播和优化
        optimizer.zero_grad()
        total_loss.backward()
        optimizer.step()
    
    return best_equation, best_params, best_mse

def main():
    # 定义目标函数
    def target_function(X):
        return np.sin(X) * 2 + np.cos(X) - 80 + np.exp(X)
    
    # 生成数据
    X, y = generate_data(target_function, n_samples=512, noise=0.8)
    
    # 优化超参数
    print("开始优化超参数...")
    best_equation, best_params, best_mse = optimize_hyperparameters(X, y)
    
    print("\n最佳超参数:")
    for key, value in best_params.items():
        print(f"{key}: {value}")
    print(f"最佳MSE: {best_mse:.4f}")

if __name__ == "__main__":
    main() 