import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler,StandardScaler,LabelEncoder,KBinsDiscretizer
from sklearn.impute import SimpleImputer
from scipy import stats
from numpy import nan as NA
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import json as json
import seaborn as sns
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from xgboost import XGBRegressor as XGBR
from xgboost import XGBRFClassifier as XGBC
from sklearn.ensemble import RandomForestRegressor as RFR
from sklearn.linear_model import LinearRegression as LinearR
from sklearn.model_selection import KFold, cross_val_score as CVS, train_test_split as TTS
from sklearn.metrics import mean_squared_error as MSE
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号
import datetime
from lightgbm import LGBMRegressor as LGBR
from lightgbm import LGBMClassifier as LGBC
from sklearn.metrics import mean_squared_error as MSE
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import MinMaxScaler,StandardScaler,LabelEncoder,KBinsDiscretizer
from sklearn.impute import SimpleImputer
import pandas as pd
from mlxtend.frequent_patterns import apriori
from mlxtend.frequent_patterns import association_rules
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import confusion_matrix, classification_report
import shap
from matplotlib import pyplot as plt
from pdpbox import pdp, get_dataset, info_plots
from sklearn.metrics import roc_curve, auc



data2 = pd.read_excel(r'B22.xlsx')
data3= data2.iloc[:,3:]
datagao = data3[data3['类型']==0]
datagao  =datagao.drop(['类型'],axis = 1)
dataqian = data3[data3['类型']==0]



X = data3.iloc[:,:-1]
y = data3.iloc[:,-1]

X_std = StandardScaler().fit_transform(X)
n_components = 2
X_std = X_std[:2000]
pca = PCA(n_components=n_components).fit(X_std)
Target = y[:2000]
X_5d = pca.transform(X_std)

import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.tools as tls



from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=2)# KMeans clustering
X_clustered = kmeans.fit_predict(X_5d)
X_clustered



fig = go.Figure()
fig.add_trace(go.Scatter(x=X_5d[X_clustered==0,0], y=X_5d[X_clustered==0,1],
                     mode='markers',
                     name='未风化'))
fig.add_trace(go.Scatter(x=X_5d[X_clustered==1,0], y = X_5d[X_clustered==1,1],
                     mode='markers',
                     name='风化'))
fig.update_layout(
    title="Kmeans聚类风化与未风化玻璃化学成分",     # 主标题
    xaxis_title="PCA feature1",  # 2个坐标轴的标题
    yaxis_title="PCA fearure2",
    font=dict(
        family="Courier New, monospace",
        size=18,
        color="#7f7f7f"
    )
)
fig.show()


XHUA = X.iloc[:,3:]
YHUA = X.iloc[:,1]
X_std = StandardScaler().fit_transform(XHUA)
n_components = 2
X_std = X_std[:2000]
pca = PCA(n_components=n_components).fit(X_std)
Target = YHUA[:2000]
X_5d = pca.transform(X_std)


import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.tools as tls

from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
fig = go.Figure()
fig.add_trace(go.Scatter(x=X_5d[Target==1,0], y=X_5d[Target==1,1],
                     mode='markers',
                     name='高钾'))
fig.add_trace(go.Scatter(x=X_5d[Target==0,0], y = X_5d[Target==0,1],
                     mode='markers',
                     name='铅钡'))
fig.update_layout(
    title="高钾与铅钡玻璃按照化学成分划分",     # 主标题
    xaxis_title="PCA feature1",  # 2个坐标轴的标题
    yaxis_title="PCA fearure2",
    font=dict(
        family="Courier New, monospace",
        size=18,
        color="#7f7f7f"
    )
)
fig.show()

import numpy as np
from sklearn.feature_selection import SelectPercentile,f_classif
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import VarianceThreshold,f_classif,mutual_info_classif as MIC

selector2= SelectPercentile(f_classif, 50)


XHUA = data3[data3['类型']==1].iloc[:,3:-1]
y = data3[data3['类型']==1]['表面风化']
av=XHUA.mean(axis=0)
std=XHUA.std(axis=0)
XHUA=(XHUA-av)/std
Xnew=selector2.fit_transform(XHUA, y)  # 用选择的变量重构矩阵
print("pvalues",selector2.pvalues_)
print("get support",selector2.get_support())
indx=np.argwhere(selector2.get_support())[:,0]
print("选择的特征编号",indx)


from UniVarSelector import UniVarSelector
univar=UniVarSelector(XHUA,y,50)  # 选择50%的变量
pvalues,indx=univar.fit()   # 返回p值及选择的变量
univar.plot()    # 制作变量得分图
print(indx)

databu = pd.read_excel(r'Bbu.xlsx')
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestRegressor

boston_df = databu.iloc[:, :-1]
y = databu.iloc[:, -1]
X_missing_reg = boston_df.copy()
# 找出数据集中，缺失值从小到大排列的特征们的顺序，并且有了这些的索引
sortindex = np.argsort(X_missing_reg.isnull().sum(axis=0)).values  # np.argsort()返回的是从小到大排序的顺序所对应的索引
for i in sortindex:
    # 构建我们的新特征矩阵（没有被选中去填充的特征 + 原始的标签）和新标签（被选中去填充的特征）
    df = X_missing_reg
    fillc = df.iloc[:, i]  # 新标签
    df = pd.concat([df.iloc[:, df.columns != i], pd.DataFrame(y)], axis=1)  # 新特征矩阵

    # 在新特征矩阵中，对含有缺失值的列，进行0的填补
    df_0 = SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=0).fit_transform(df)
    # 找出我们的训练集和测试集
    Ytrain = fillc[fillc.notnull()]  # Ytrain是被选中要填充的特征中（现在是我们的标签），存在的那些值：非空值
    Ytest = fillc[fillc.isnull()]  # Ytest 是被选中要填充的特征中（现在是我们的标签），不存在的那些值：空值。注意我们需要的不是Ytest的值，需要的是Ytest所带的索引
    Xtrain = df_0[Ytrain.index, :]  # 在新特征矩阵上，被选出来的要填充的特征的非空值所对应的记录
    Xtest = df_0[Ytest.index, :]  # 在新特征矩阵上，被选出来的要填充的特征的空值所对应的记录
    if Ytest.size == 0:
        print(i, " is null")
        pass
    else:
        # 用随机森林回归来填补缺失值
        rfc = RandomForestRegressor(n_estimators=100)  # 实例化
        rfc = rfc.fit(Xtrain, Ytrain)  # 导入训练集进行训练
        Ypredict = rfc.predict(Xtest)  # 用predict接口将Xtest导入，得到我们的预测结果（回归结果），就是我们要用来填补空值的这些值
        # 将填补好的特征返回到我们的原始的特征矩阵中
        X_missing_reg.iloc[X_missing_reg.iloc[:, i].isnull(), i] = Ypredict
# 检验是否有空值
X_missing_reg.isnull().sum()




import numpy as np
from sklearn.feature_selection import SelectPercentile,f_classif
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import VarianceThreshold,f_classif,mutual_info_classif as MIC

selector2= SelectPercentile(f_classif, 50)


XHUA = data3[data3['类型']==0].iloc[:,3:-1]
y = data3[data3['类型']==0]['表面风化']
av=XHUA.mean(axis=0)
std=XHUA.std(axis=0)
XHUA=(XHUA-av)/std
Xnew=selector2.fit_transform(XHUA, y)  # 用选择的变量重构矩阵
print("pvalues",selector2.pvalues_)
print("get support",selector2.get_support())
indx=np.argwhere(selector2.get_support())[:,0]
print("选择的特征编号",indx)


from UniVarSelector import UniVarSelector
univar=UniVarSelector(XHUA,y,50)  # 选择50%的变量
pvalues,indx=univar.fit()   # 返回p值及选择的变量
univar.plot()    # 制作变量得分图
print(indx)

databu = pd.read_excel(r'Bbu0.xlsx')
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestRegressor

boston_df = databu.iloc[:, :-1]
y = databu.iloc[:, -1]
X_missing_reg = boston_df.copy()
# 找出数据集中，缺失值从小到大排列的特征们的顺序，并且有了这些的索引
sortindex = np.argsort(X_missing_reg.isnull().sum(axis=0)).values  # np.argsort()返回的是从小到大排序的顺序所对应的索引
for i in sortindex:
    # 构建我们的新特征矩阵（没有被选中去填充的特征 + 原始的标签）和新标签（被选中去填充的特征）
    df = X_missing_reg
    fillc = df.iloc[:, i]  # 新标签
    df = pd.concat([df.iloc[:, df.columns != i], pd.DataFrame(y)], axis=1)  # 新特征矩阵

    # 在新特征矩阵中，对含有缺失值的列，进行0的填补
    df_0 = SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=0).fit_transform(df)
    # 找出我们的训练集和测试集
    Ytrain = fillc[fillc.notnull()]  # Ytrain是被选中要填充的特征中（现在是我们的标签），存在的那些值：非空值
    Ytest = fillc[fillc.isnull()]  # Ytest 是被选中要填充的特征中（现在是我们的标签），不存在的那些值：空值。注意我们需要的不是Ytest的值，需要的是Ytest所带的索引
    Xtrain = df_0[Ytrain.index, :]  # 在新特征矩阵上，被选出来的要填充的特征的非空值所对应的记录
    Xtest = df_0[Ytest.index, :]  # 在新特征矩阵上，被选出来的要填充的特征的空值所对应的记录
    if Ytest.size == 0:
        print(i, " is null")
        pass
    else:
        # 用随机森林回归来填补缺失值
        rfc = RandomForestRegressor(n_estimators=100)  # 实例化
        rfc = rfc.fit(Xtrain, Ytrain)  # 导入训练集进行训练
        Ypredict = rfc.predict(Xtest)  # 用predict接口将Xtest导入，得到我们的预测结果（回归结果），就是我们要用来填补空值的这些值
        # 将填补好的特征返回到我们的原始的特征矩阵中
        X_missing_reg.iloc[X_missing_reg.iloc[:, i].isnull(), i] = Ypredict
# 检验是否有空值
X_missing_reg.isnull().sum()


