import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_val_score

dataSet = load_boston()
x_full, y_full = dataSet.data, dataSet.target
n_samples = x_full.shape[0]
n_features = x_full.shape[1]

# 制造一些缺失值
rng = np.random.RandomState(0)
# 50%的缺失率
missing_rate = 0.5
# 缺失的样本数
n_missing_sampeles = int(np.floor(n_samples * n_features * missing_rate))
# 让缺失值随机分布
missing_features = rng.randint(0, n_features, n_missing_sampeles)
missing_samples = rng.randint(0, n_samples, n_missing_sampeles)

x_missing = x_full.copy()
y_missing = y_full.copy()
x_missing[missing_samples, missing_features] = np.nan

x_missing = pd.DataFrame(x_missing)
# print(x_missing)

# 用均值填补缺失值
from sklearn.impute import SimpleImputer

imp_mean = SimpleImputer(missing_values=np.nan, strategy="mean")
X_missing_mean = imp_mean.fit_transform(x_missing)

# 用0填补缺失值
imp_0 = SimpleImputer(missing_values=np.nan, strategy="constant", fill_value=0)
X_missing_0 = imp_0.fit_transform(x_missing)

# 随机森林填补缺失值
x_missing_reg = x_missing.copy()
# 查找有缺失值的特征，并统计缺失值的个数，按个数从小到大排序
sortIndex = np.argsort(x_missing_reg.isnull().sum(axis=0)).values

for i in sortIndex:
    df = x_missing_reg
    # 获取当前需要填充的列（特征） 将其作为新的Y
    fillc = df.iloc[:, i]
    # 除去需要填充的列，剩余的特征列+结果作为新的特征
    df = pd.concat([df.iloc[:, df.columns != i], pd.DataFrame(y_full)], axis=1)

    # 对新的特征矩阵进行临时的缺失值填充0
    df_0 = SimpleImputer(missing_values=np.nan, strategy="constant", fill_value=0).fit_transform(df)

    # fillc中空的值为测试集，不空的为训练集
    Ytest = fillc[fillc.isnull()]
    Ytrain = fillc[fillc.notnull()]
    Xtrain = df_0[Ytrain.index, :]
    Xtest = df_0[Ytest.index, :]

    rfr = RandomForestRegressor(n_estimators=100)
    rfr.fit(Xtrain, Ytrain)
    Ypredict = rfr.predict(Xtest)

    x_missing_reg.loc[x_missing_reg.iloc[:, i].isnull(), i] = Ypredict

# 使用原始数据、0填充、均值填充、随机森林填充的训练样本进行建模
X = [x_full, X_missing_mean, X_missing_0, x_missing_reg]
mse = []
std = []

for x in X:
    estimator = RandomForestRegressor(random_state=1, n_estimators=100)
    scores = cross_val_score(estimator, x, y_full, scoring="neg_mean_squared_error", cv=5).mean()
    # 因为sklearn中neg_mean_squared_error被看作是一种损失，所以是负数 在这里手动*-1转为正的
    mse.append(scores * -1)

# 画个图对比一下

x_labels = ['Full data', 'Zero data', 'Mean data', 'reg data']
colors = ['r', 'g', 'b', 'orange']

plt.figure(figsize=(12, 6))
ax = plt.subplot(111)
for i in np.arange(len(mse)):
    ax.barh(i, mse[i])
    # ax.barh(i, mse[i], colors=[colors[i]], align='center')
ax.set_title('xxxxx')
ax.set_xlim(left=np.min(mse) * 0.9
            , right=np.max(mse) * 1.1)
ax.set_yticks(np.arange(len(mse)))
ax.set_xlabel('MSE')
ax.set_yticklabels(x_labels)
plt.show()
#结果证明