from abc import ABCMeta
from pprint import pprint
from pysr import PySRRegressor
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim

from sklearn.model_selection import train_test_split
from bishe_situations.utils import BaseNeuralTrainer, HyperparameterOptimizer, history_to_kwargs, mse_tensor, postprocess_params, preprocess_params
from bishe_situations.utils import BINARY_OPS_PYSR, PYSR_PARAMS, UNARY_OPS_PYSR, generate_data
from collections import deque
    
class NeuralPysrTrainer(BaseNeuralTrainer):
    def __init__(self, params: dict = PYSR_PARAMS, population_size: int = 50, generations: int = 10, niterations: int = 10, **kwargs):
        super().__init__(params, population_size, generations, niterations,**kwargs)
        self.device = kwargs.get('device', 'cuda')
        
    def override_fit(self, X, y):
         # 划分训练集和验证集
        X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
        # 初始化模型和优化器
        model = HyperparameterOptimizer().to(self.device)
        optimizer = optim.Adam(model.parameters(), lr=0.001)
        criterion = nn.MSELoss()
        
        for trial in range(self.niterations):
            # 获取参数预测
            params: torch.Tensor = model(torch.tensor([0]).to(self.device))
            params_detached = params.clone().detach().cpu().numpy()

            # 处理参数
            processed_param = postprocess_params(params_detached, PYSR_PARAMS)
            kwargs = history_to_kwargs(processed_param[0], PYSR_PARAMS)
            pprint(kwargs)
            
            # 训练PySR模型
            pysr_model = PySRRegressor(
                binary_operators=BINARY_OPS_PYSR,
                unary_operators=UNARY_OPS_PYSR,
                niterations=self.generations,
                population_size=self.population_size,
                maxsize=20,
                parsimony=0.1,
                progress=False,
                verbosity=1,
                constraints={'^': (-5, 5)},
                **kwargs
            )
            pysr_model.fit(X_train, y_train)
            
            # 在验证集上评估
            y_pred = pysr_model.predict(X_val)
            
            # 将预测结果转换为tensor并保持梯度
            y_pred_tensor = torch.from_numpy(np.array(y_pred.flatten())).float().requires_grad_(True)
            y_val_tensor = torch.from_numpy(np.array(y_val.flatten())).float()
            
            # 计算loss并保持梯度
            loss = criterion(y_val_tensor, y_pred_tensor)
            
            if loss.item() < self.mse:
                self.mse = loss.item()
                self.best_equation = pysr_model.sympy()
                print(f"$$$$Trial {trial+1}/{self.niterations}, MSE: {loss.item():.4f}", pysr_model.sympy(), "#better#")
            else:
                print(f"$$$$Trial {trial+1}/{self.niterations}, MSE: {loss.item():.4f}", pysr_model.sympy())

            # 反向传播和优化
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
    
    def best_function(self):
        return self.best_equation
        

def optimize_hyperparameters(X, y, n_trials=10):
    model = NeuralPysrTrainer(PYSR_PARAMS, 50, 10, 10, 4)
    model.fit(X, y)
    
    return model.best_function(), model.mse, model.train_time

def main():
    # 定义目标函数
    def target_function(X):
        return np.sin(X) * 2 + np.cos(X) - 80 + np.exp(X)
    
    # 生成数据
    X, y = generate_data(target_function, n_samples=512, noise=0.8)
    
    # 优化超参数
    print("开始优化超参数...")
    best_equation, best_mse, train_time = optimize_hyperparameters(X, y)
    
    print("\n最佳超参数:")
    print(f"最佳MSE: {best_mse:.4f}")
    print(f"最佳方程: {best_equation}")
    print(f"训练时间: {train_time:.4f}秒")

if __name__ == "__main__":
    main() 