import random
import torch
import torch.nn as nn

# 定义需要优化的参数列表
WAIT_OPTIM_DICT = { 
    "fraction_replaced": 0.00036,
    "fraction_replaced_hof": 0.0614,
    "weight_add_node": 2.47,
    "weight_insert_node": 0.0112,
    "weight_delete_node": 0.87,
    "weight_do_nothing": 0.273,
    "weight_mutate_constant": 0.0346,
    "weight_mutate_operator": 0.293,
    "weight_swap_operands": 0.198,
    "weight_rotate_tree": 4.26,
    "weight_randomize": 0.000502,
    "weight_simplify": 0.00209,
    "weight_optimize": 1e-7,
    "crossover_probability": 0.0259,
    "optimize_probability": 0.14,
    "perturbation_factor": 0.129,
    "probability_negate_constant": 0.00743,
}

WAIT_OPTIM = list(WAIT_OPTIM_DICT.keys())
WAIT_OPTIM_DEFAULT = list(WAIT_OPTIM_DICT.values()) # 默认参数

def preprocess_params(params):
    # print(params)
    # 归到-1，1之间
    for i, param in enumerate(params):
        params[i] = param / WAIT_OPTIM_DEFAULT[i] # 先除以默认值进行归一化
        params[i] = 2 * (params[i] / (1 + abs(params[i]))) # 使用双曲正切的变体将值映射到[-1,1]区间
    return params

def postprocess_params(params):
    # 返回原来的范围
    for i, param in enumerate(params):
        params[i] = (param + 1) * WAIT_OPTIM_DEFAULT[i] / 2
    return params
    

def init_history(batch_size = 8):
    ret = []
    def nega1_posi1():
        return random.random() * 2 - 1
    for _ in range(batch_size - 1):
        this_params_set = []
        for item in WAIT_OPTIM_DEFAULT:
            this_params_set.append(item * nega1_posi1())
        ret.append(this_params_set)
    return ret

def history_to_kwargs(history):
    # print(len(history), len(WAIT_OPTIM))
    assert len(history) == len(WAIT_OPTIM)
    ret = {
        # "binary_operators": ["+", "-", "*", "/"],
        # "unary_operators": ["sin", "cos", "exp", "log"],
        # "niterations": 20,
        # "population_size": 20,
        # "maxsize": 20,
        # "parsimony": 0.1,
        # "verbosity": 1
    }
    for i, params in enumerate(history):
        ret[WAIT_OPTIM[i]] = params
    return ret

class HyperparameterOptimizer(nn.Module):
    def __init__(self, n_params=len(WAIT_OPTIM)):
        super(HyperparameterOptimizer, self).__init__()
        self.n_params = n_params
        
        # 全连接层
        self.fc = nn.Sequential(
            nn.Linear(n_params, 256),
            nn.ReLU(),
            nn.Dropout(0.2),
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Dropout(0.2),
            nn.Linear(128, n_params)
        )
        
    def forward(self, x):
        # x shape: (batch_size, n_params)
        return self.fc(x)
    
if __name__ == "__main__":
    # optimizer = HyperparameterOptimizer()
    # input_data = torch.randn(32, len(WAIT_OPTIM))  # 修改输入维度
    # print(input_data.shape)
    # output_data = optimizer(input_data)
    # print(output_data.shape)
    
    print("---------------------------------------")
    
    # param1 = WAIT_OPTIM_DEFAULT.copy()
    # print(param1)
    # param2 = preprocess_params(param1)
    # print(param2)
    # param3 = postprocess_params(param2)
    # print(param3)
    # assert param1 == param3
    
    print("---------------------------------------")
    
    history = init_history(8)
    print(history)