from abc import ABCMeta, abstractmethod
import random
import time
import numpy as np
import torch

UNARY_OPS_PYSR = ["neg", "exp", "sqrt", "log","sin", "cos", "tan", "square", "cube", "abs"]
BINARY_OPS_PYSR = ["+", "-", "*", "/", "^"]
UNARY_OPS_GPLEARN = ('neg', 'sqrt','log','inv','sin','cos','tan','abs')
BINARY_OPS_GPLEARN = ('add','sub','mul')
OPS_GPLEARN = UNARY_OPS_GPLEARN + BINARY_OPS_GPLEARN

GPLEARN_PARAMS = {
    'p_crossover': 0.65,
    'p_subtree_mutation': 0.01,
    'p_hoist_mutation': 0.01,
    'p_point_mutation': 0.01,
}

PYSR_PARAMS = { 
    "fraction_replaced": 0.00036,
    "fraction_replaced_hof": 0.0614,
    "weight_add_node": 2.47,
    "weight_insert_node": 0.0112,
    "weight_delete_node": 0.87,
    "weight_do_nothing": 0.273,
    "weight_mutate_constant": 0.0346,
    "weight_mutate_operator": 0.293,
    "weight_swap_operands": 0.198,
    "weight_rotate_tree": 4.26,
    "weight_randomize": 0.000502,
    "weight_simplify": 0.00209,
    "weight_optimize": 1e-7,
    "crossover_probability": 0.0259,
    "optimize_probability": 0.14,
    "perturbation_factor": 0.129,
    "probability_negate_constant": 0.00743,
}

def preprocess_params(params, dict_items: dict = PYSR_PARAMS):
    dict_values = list(dict_items.values())
    # print(dict_values)
    # 归到-1，1之间
    for i, param in enumerate(params):
        params[i] = param / dict_values[i] # 先除以默认值进行归一化
        params[i] = 2 * (params[i] / (1 + abs(params[i]))) # 使用双曲正切的变体将值映射到[-0.25,1]区间
    return params

def postprocess_params(params: np.ndarray, dict_items: dict = PYSR_PARAMS):
    # 返回原来的范围
    dict_values = list(dict_items.values())
    for i, param in enumerate(params):
        params[i] = (param + 1) * dict_values[i] / 2
    return params
    

def history_to_kwargs(history, dict_items: dict = PYSR_PARAMS):
    dict_keys = list(dict_items.keys())
    # print(len(history), history, len(dict_items), dict_items)
    assert len(history) == len(dict_items)
    ret = {}
    for i, params in enumerate(history):
        ret[dict_keys[i]] = params
    return ret

def mse_tensor(y_pred, y_true):
    return torch.mean((y_pred - y_true) ** 2)

def generate_data(func, n_samples=1000, noise=0.1, random_state=42):
    """
    生成符号回归数据集
    Args:
        func: 目标函数
        n_samples: 样本数量
        noise: 噪声水平
        random_state: 随机种子
    Returns:
        X: 输入特征
        y: 目标值
    """
    np.random.seed(random_state)
    X = np.random.uniform(-10, 10, (n_samples, 1))
    y = func(X)
    y += np.random.normal(0, noise, y.shape)
    return X, y

class BaselineTrainer(metaclass=ABCMeta):
    def __init__(self, params: dict, population_size: int, generations: int, **kwargs):
        self.params = params
        self.population_size = population_size
        self.generations = generations
        self.kwargs = kwargs
        self.train_time_start = None
        self.train_time_end = None
        self.train_time = None
        self.mse = None
        
    
    def pre_fit(self):
        self.train_time_start = time.time()
        
    def post_fit(self):
        self.train_time_end = time.time()
        self.train_time = self.train_time_end - self.train_time_start
    
    @abstractmethod
    def override_fit(self, X, y):
        pass
    
    def fit(self, X, y):
        self.pre_fit()
        self.override_fit(X, y)
        self.post_fit()
    
    @abstractmethod
    def best_function(self):
        pass

class BaseNeuralTrainer(BaselineTrainer):
    def __init__(self, params: dict, population_size: int, generations: int, niterations: int, **kwargs):
        self.params = params
        self.population_size = population_size
        self.generations = generations
        self.niterations = niterations
        self.kwargs = kwargs
        self.best_equation = None
        self.mse = float('inf')
          
    def override_fit(self, X, y):
        pass
    
    def best_function(self):
        pass

import sympy
from torch import nn

class HyperparameterOptimizer(nn.Module):
    def __init__(self, n_params=len(PYSR_PARAMS)):
        super(HyperparameterOptimizer, self).__init__()
        self.embedded = nn.Embedding(1, 128)
        # 全连接层
        self.fc = nn.Sequential(
            nn.Linear(128, 256),
            nn.ReLU(),
            # nn.Dropout(0.2),
            nn.Linear(256, 128),
            nn.ReLU(),
            # nn.Dropout(0.2),
            nn.Linear(128, n_params)
        )
        
    def forward(self, x):
        x = self.embedded(x)
        # x shape: (batch_size, n_params)
        return self.fc(x)
    
    
