'''
对皮马印第安人糖尿病发作的概率数据进行特征工程
'''
import numpy as np
import pandas as pd
train = pd.read_csv('E:\【❤】CSDN_AI_CLASS\第五周\logistic回归作业\pima-indians-diabetes .csv')
print(train.head())
print(train.info())
print(train.describe())
NaN_col_names = ["Glucose",
                 "BloodPressure",
                 "SkinThickness",
                 "Insulin", "BMI"]
print((train[NaN_col_names] <=  0).sum())
train[NaN_col_names] = train[NaN_col_names].replace(0, np.NaN)
print(train.isnull().sum())
#缺失值比较多，干脆就新开一个新的字段，表明是缺失值还是不是缺失值
train['Triceps_skin_fold_thickness_Missing'] = train['SkinThickness'].apply(lambda x : 1 if pd.isnull(x) else 0)
print(train[['SkinThickness', 'Triceps_skin_fold_thickness_Missing']].head(10))

import matplotlib.pyplot as plt
import seaborn as sns
#color = sns.color_palette()
sns.countplot(x='Triceps_skin_fold_thickness_Missing', hue='Outcome', data=train)
plt.show()

#缺失值比较多，干脆就开一个新的字段，表明是缺失值还是不是缺失值
train['serum_insulin_Missing'] = train['Insulin'].apply(lambda x: 1 if pd.isnull(x) else 0)
sns.countplot(x="serum_insulin_Missing", hue="Outcome",data=train)
plt.show()
train.drop(["Triceps_skin_fold_thickness_Missing", "serum_insulin_Missing"], axis=1, inplace=True)
medians = train.median()
train = train.fillna(medians)
print(train.isnull().sum())
#数据标准化
#  get labels
y_train = train['Outcome']
X_train = train.drop(["Outcome"], axis=1)
#用于保存特征工程之后的结果
feat_names = X_train.columns
# 数据标准化
from sklearn.preprocessing import StandardScaler
# 初始化特征的标准化器
ss_X = StandardScaler()
# 分别对训练和测试数据的特征进行标准化处理
X_train = ss_X.fit_transform(X_train)
#特征处理结果存为文件,存为csv格式
X_train = pd.DataFrame(columns = feat_names, data = X_train)
train = pd.concat([X_train, y_train], axis = 1)
train.to_csv('E:\【❤】CSDN_AI_CLASS\第五周\logistic回归作业\FE_pima-indians-diabetes.csv',index = False,header=True)
print(train.head())
