import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns


filename = r'E:\images\pima-indians-diabetes.csv'
train = pd.read_csv(filename)
train.head()

print('Train：', train.shape)

NaN_col_names = ['Glucose','BloodPressure','SkinThickness','Insulin','BMI']
print((train[NaN_col_names] == 0).sum())
# Diabetes糖尿病
sns.countplot(train['Target'])
plt.xlabel('Diabetes')  # Diabetes糖尿病
plt.ylabel('Number of occurrences')

# 怀孕次数pregnants
fig = plt.figure(figsize=(8,8))
sns.countplot(train['pregnants'])
plt.xlabel('Number of pregnants')
plt.ylabel('Number of occurrences')

sns.countplot(x='pregnants', hue='Target', data=train)

# 血浆葡萄糖浓度 Plasma_glucose_concentration
fig = plt.figure(figsize=(8,8))
sns.distplot(train.Plasma_glucose_concentration, kde=False)
plt.xlabel('Glucose')
plt.ylabel('Number of occurrences')

sns.violinplot(x='Target', y='Glucose', data=train, hue='Target')


# 血压 blood_pressure
fig = plt.figure()
sns.distplot(train.blood_pressure, kde = False)
plt.xlabel('BloodPressure')
plt.ylabel('frequency')

# 查看blood_pressure 与标签之间的关系
sns.violinplot(x='Target', y='blood_pressure', data=train, hue='Target')
plt.xlabel('Diabetes', fontsize=12)
plt.ylabel('BloodPressure', fontsize=12)

# 三头肌皮褶厚度 Triceps_skin_fold_thickness
fig = plt.figure()
sns.distplot(train.Triceps_skin_fold_thickness, kde = False)
plt.xlabel('SkinThickness')
plt.ylabel('frequency')

sns.violinplot(x='Target', y='SkinThickness', data=train, hue='Target')
plt.xlabel('Diabetes', fontsize=12)
plt.ylabel('SkinThickness',fontsize=12)
plt.show()

# 餐后血清胰岛素 serum_insulin
fig = plt.figure(figsize=(8,8))
sns.distplot(train.serum_insulin, kde=False)
plt.xlabel('Insulin')
plt.ylabel('frequency')

sns.violinplot(x='Target', y='Insulin', data=train, hue='Target')
plt.xlabel('Diabetes', fontsize=12)
plt.ylabel('Insulin',fontsize=12)
plt.show()

# BMI
fig = plt.figure(figsize=(8,8))
sns.distplot(train.BMI, kde=False)
plt.xlabel('BMI')
plt.ylabel('frequency')

sns.violinplot(x='Target', y='BMI', data=train, hue='Target')
plt.xlabel('Diabetes', fontsize=12)
plt.ylabel('BMI',fontsize=12)
plt.show()

BMIDF = train.groupby(['BMI','Target'])['BMI'].count().unstack('Target').fillna(0)
BMIDF[[0, 1]].plot(kind='bar', stacked=True,figsize=(15,8))

# Diabetes_pedigree_function 糖尿病家系作用
fig=plt.figure()
sns.distplot(train.Diabetes_pedigree_function, kde=False)
plt.xlabel('Diabetes_pedigree_function')
plt.ylabel('frequency')

DF = train.groupby(['Diabetes_pedigree_function','Target'])['Diabetes_pedigree_function'].count().unstack('Target').fillna(0)
DF[[0,1]].plot(kind='bar', stacked=True)

# 年龄 Age
fig=plt.figure()
sns.distplot(train.Age, kde=False)
plt.xlabel('Age')
plt.ylabel('frequency')

DF = train.groupby(['Age','Target'])['Age'].count().unstack('Target').fillna(0)
DF[[0,1]].plot(kind='bar', stacked=True)

data_corr = train.corr().abs()

plt.subplots(figsize=(13,9))
sns.heatmap(data_corr,annot=True)


for feature in train.columns:
    sns.distplot(train[feature], kde=False)
    plt.show()

NaN_col_names = ['Glucose','BloodPressure','SkinThickness','Insulin','BMI']
train[NaN_col_names] = train[NaN_col_names].replace(0, np.NaN)
print(train.isnull().sum())

train['Triceps_skin_fold_thickness_Missing'] = train['Triceps_skin_fold_thickness'].apply(lambda x: 1 if pd.isnull(x) else 0)
train[['Triceps_skin_fold_thickness','Triceps_skin_fold_thickness_Missing']].head(10)

sns.countplot(x='Triceps_skin_fold_thickness_Missing', hue='Target', data = train)

train['serum_insulin_Missing'] = train['serum_insulin'].apply(lambda x: 1 if pd.isnull(x) else 0)
train[['serum_insulin','serum_insulin_Missing']].head(10)

sns.countplot(x='serum_insulin_Missing', hue='Target', data = train)
train.drop(['serum_insulin_Missing','Triceps_skin_fold_thickness_Missing'], axis=1, inplace=True)

medians = train.median()

# fillna()会将DataFrame中nan数据的数据填充为想要的数据，并返回填充后的结果。如果希望在原DataFrame中修改，则把inplace设置为True。
train = train.fillna(medians)

print(train.isnull().sum())  # isnull().sum()就更加直观了，它直接告诉了我们每列缺失值的数量。

y_train = train['Target']
X_train = train.drop(['Target'], axis=1)

#数据标准化
# 用于保存特征工程之后的结果
feat_names = X_train.columns

# 数据标准化
from sklearn.preprocessing import StandardScaler

# 初始化特征的标准器
ss_X = StandardScaler()

# 分别对训练和测试数据的特征进行标准化
X_train = ss_X.fit_transform(X_train)

X_train = pd.DataFrame(columns=feat_names, data = X_train)

train = pd.concat([X_train, y_train], axis=1)

train.to_csv('FE_pima-indians-diabetes_lpw.csv',index = False, header=True)





from sklearn.linear_model import LogisticRegression

from sklearn.linear_model import LogisticRegressionCV
from sklearn.model_selection import GridSearchCV

# 读入数据
train = pd.read_csv('FE_pima-indians-diabetes_lpw.csv')

# 分离特征和目标
y_train = train['Target']
X_train = train.drop(['Target'], axis=1)

# 保存特征名字以备后用
feat_names = X_train.columns
# 1.设置参数搜索范围
penaltys = ['l1', 'l2']
Cs = [0.001,0.01,0.1,1,10,100,1000]

# 2.生成学习器实例
lr_penalty = LogisticRegression(solver='liblinear')

# 3.生成GridSearchCV的实例
tuned_parameters = dict(penalty=penaltys,C=Cs)
grid_log = GridSearchCV(lr_penalty, tuned_parameters, cv=5, scoring='neg_log_loss', n_jobs=4)  # 5折交叉验证，log似然损失
# 4.调用GridSearchCV的fit方法
grid_log.fit(X_train, y_train)
# 打印训练结果
print('log损失评估的交叉验证后的最佳得分：', -grid_log.best_score_)
print('log损失评估的最佳超参数为：', grid_log.best_params_)



# Logistic Regression + GridSearchCV 超参数调整

# 1.设置参数搜索范围
penaltys = ['l1', 'l2']
Cs = [0.001,0.01,0.1,1,10,100,1000]

# 2.生成学习器实例
lr_penalty = LogisticRegression(solver='liblinear')

# 3.生成GridSearchCV的实例
tuned_parameters = dict(penalty=penaltys,C=Cs)
grid_accuracy = GridSearchCV(lr_penalty, tuned_parameters, cv=5, scoring='accuracy', n_jobs=4)  #5折交叉验证，正确率评价指标
# 4.调用GridSearchCV的fit方法
grid_accuracy.fit(X_train,y_train)
# 打印训练结果
print('正确率评估的交叉验证后的最佳得分：', grid_accuracy.best_score_)
print('正确率的最佳超参数为：', grid_accuracy.best_params_)