import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import csv

filename=r'F:\AI\data\diabetes.csv'
df=pd.read_csv(filename)
#离散型变量描述
sns.countplot(df.Target)
plt.xlabel('target')
plt.ylabel('Number of occurrences')
#特征之间相关系数
cols=df.columns
feat_corr=df.corr().abs()
plt.subplots(figsize=(12,9))
sns.heatmap(feat_corr,annot=True)
sns.heatmap(feat_corr,mask=feat_corr<1,cbar=False)

#分组功能
#BMIDF=df.groupby(['BMI','Target'])['BMI'].count().unstack('Target').fillna(0)
#BMIDF[[0.1]].plot(kind='bar',stacked=True) #未显示

#NaN_col_names=['Plasma_glucose_concentration','blood_pressure','Triceps_skin_fold_thickness','serum_insulin''BMI']
#df[NaN_col_names]=df[NaN_col_names].replace(0,np.NaN)
#print(df.isnull().sum())

y=df['Target']
x=df.drop('Target',axis=1)
feat_names=x.columns
from sklearn.preprocessing import StandardScaler
ss_x=StandardScaler()
ss_y=StandardScaler()
x=ss_x.fit_transform(x)
y=ss_y.fit_transform(y.reshape(-1,1))
fe_data=pd.DataFrame(data=x,index=df.inex)
fe_data=pd.concat([fe_data,x],axis=1,ignore_index=False)
fe_data['Target']=y
from sklearn.model_selection import  train_test_split
x_train,x_test,y_train,y_test=train_test_split(x,y,random_state=33,test_size=0.2)
from sklearn.linear_model import LinearRegression
lr=LinearRegression()
lr.fit(x_train,y_train)
y_test_pred_lr=lr.predict(x_test)
y_train_pred_lr=lr.predict(x_train)
fs=pd.DataFrame({'columns':list(feat_names),'coef':list((lr.coef_.T))})
fs.sort_values(by=['coef'],ascending=False)
from sklearn.linear_model import RidgeCV
alphas=[0.01,0.1,1,10,100]
ridge=RidgeCV(alphas=alphas,store_cv_values=True)
ridge.fit(x_train,y_train)
y_test_pred_ridge=lr.predict(x_test)
y_train_pred_ridge=lr.predict(x_train)
from sklearn.linear_model import LassoCV
alphas=[0.01,0.1,1,10,100]
lasso=LassoCV(alphas=alphas,store_cv_values=True)
LassoCV.fit(x_train,y_train)
y_test_pred_lasso=lr.predict(x_test)
y_train_pred_lasso=lr.predict(x_train)
from sklearn.metrics import r2_score
print('The r2 score of LinearRegresssion on test is',r2_score(y_test,y_test_pred_lr))
print('The r2 score of RidgeCV on test is',r2_score(y_test,y_test_pred_lr))
print('The r2 score of LassoCV on test is',r2_score(y_test,y_test_pred_lr))

from sklearn.model_selection import cross_val_score
loss=cross_val_score(lr,x_train,y_train,cv=5,scoring='neg_log_loss')
print('logloss of each fold is:',-loss)
print('cv logloss is:',-loss.mean())

from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression
penaltys=['11','12']
Cs=[0.1,1,10,100,1000]
tuned_parameters=dict(penalty=penaltys,C=Cs)
lr_penalty=LogisticRegression(solver='liblinear')
grid=GridSearchCV(lr_penalty,tuned_parameters,cv=5,scoring='neg_log_loss',n_jobs=4)
grid.fit(x_train,y_train)
print(-grid.best_score_)
print(grid.best_params_)