import numpy as np
import pandas as pd
from scipy import stats
from sklearn.preprocessing import StandardScaler
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.metrics import r2_score
import matplotlib.pyplot as plt
import matplotlib
from sklearn.gaussian_process.kernels import ConstantKernel, RBF

# 设置字体
font = {'family': 'Microsoft YaHei'}
matplotlib.rc('font', **font)

# 1. 读取数据
data = pd.read_csv('GPRdata1.csv', skiprows=2)

# 2. 数据预处理
X = data.iloc[:, :6]  # 输入数据为前六列
y = data.iloc[:, 6]   # 输出数据为第七列

# 清理缺失值：使用平均值填充 NaN
X.fillna(X.mean(), inplace=True)
y.fillna(y.mean(), inplace=True)

# 删除离群点：计算z-score并删除z-score大于3的数据点
z_scores = stats.zscore(X)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
X = X[filtered_entries]
y = y[filtered_entries]

# 特征标准化
scaler_X = StandardScaler()
X_scaled = pd.DataFrame(scaler_X.fit_transform(X), columns=X.columns)

# 输出数据标准化，确保使用相同的scaler对象
scaler_y = StandardScaler()
y_scaled = scaler_y.fit_transform(y.values.reshape(-1,1))

# 使用数据作为训练集
X_train = X_scaled
y_train = y_scaled

# 3. 模型训练
kernel = ConstantKernel(0.1, (1e-6, 1e6)) * RBF(0.1, (1e-4, 1e5))
gpr = GaussianProcessRegressor(kernel=kernel, random_state=0)
gpr.fit(X_train, y_train.ravel())

# 预测数据 输出：测试集的预测值向量y_pred，模型预测的R2分数r2
y_pred, std_dev = gpr.predict(X_train, return_std=True)
y_pred_inv = scaler_y.inverse_transform(y_pred.reshape(-1, 1))
y_train_inv = scaler_y.inverse_transform(y_train)
r2 = r2_score(y_train_inv, y_pred_inv)

print(f'R2系数: {r2}')

# 绘制出预测值和实际值对比图，使用实际数据y值与预测值y值对比
plt.figure(figsize=(10, 5))
plt.scatter(y_train_inv.ravel(), y_pred_inv.ravel(), c='crimson')
plt.errorbar(y_train_inv.ravel(), y_pred_inv.ravel(), yerr=1.96*std_dev, fmt='o', color='lightgray', alpha=0.5, label='95% 置信区间')
plt.xlabel('实际值')
plt.ylabel('预测值')
plt.title('预测值 vs 训练数据值')
plt.legend()
plt.show()
print("finished")