import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Ridge
from sklearn.metrics import mean_squared_error
import optuna
# 假设我们有一个数据集 data.csv，包含特征列 x1, x2, x3 和目标列 y
data = pd.read_csv('data7.csv',encoding='gbk')

# 提取特征和目标变量
X = data[['x1', 'x2', 'x3']]
y1 = data['y']
y2=y1[1:1+48]
y = np.arange(16)
y = pd.Series(y)
for i in range(16):
    y[i]=(y2[i*3+1]+y2[i*3+2]+y2[i*3+3])/3

X=X[0:16]
# X['x3_squared'] = X['x3'] ** 2
# X['x3_cube'] = X['x3'] ** 3
# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)


# 定义岭回归模型，并设置正则化参数 alpha
model = Ridge(alpha=1)

# 训练模型
model.fit(X_train, y_train)

# 进行预测
y_pred = model.predict(X_test)

# 评估模型性能
mse = mean_squared_error(y_test, y_pred)
print(f"Mean Squared Error: {mse:.4f}")
def objective(trial):
    # 2. 使用trial对象建议超参数取值
    x1 = trial.suggest_categorical('x1', [15, 20, 25, 30])
    x2 = trial.suggest_categorical('x2', [100, 110, 120, 130])
    x3 = trial.suggest_categorical('x3', [0, 10, 20, 30])
    #data = loadData('mackey_glass_t17.npy')

    max = model.coef_[0]*x1+model.coef_[1]*x2+model.coef_[2]*x3+model.intercept_;

    return max
# 创建Optuna study
study = optuna.create_study(direction='maximize')
y_pred = model.predict(X_test)


 # 运行Optuna搜索
study.optimize(objective, n_trials=100)
# 打印回归系数
print("Coefficients:", model.coef_)
print("Intercept:", model.intercept_)
# 计算均方误差
mse = mean_squared_error(y_test, y_pred)
print(f"均方误差: {mse}")