# -*- coding: utf-8 -*-

import pandas as pd
import numpy as np
import matplotlib.pyplot as pyplot
import seaborn as sns

data = pd.read_csv('../diabetes.csv')

print("数据类型信息")
print(data.info())

# print(data.head(1))
print("数据分布信息")
print(data.describe())

column_features = ["Pregnancies","Age"]
# for column in column_features :
#     zero = (data[column].values == 0).sum()
#     print "【zero】统计:", column,zero
#     fig = pyplot.figure()
#     sns.distplot(data[column].values, bins=20, kde=False)
#     pyplot.xlabel(column, fontsize=12)
#     pyplot.show()

# 看一下各个数据的分布柱状图
print(data.columns.values)
for column in data.columns.values :
    zero = (data[column].values == 0).sum()
    print "【zero】统计:", column,zero
    fig = pyplot.figure()
    sns.distplot(data[column].values, bins=20, kde=False)
    pyplot.xlabel(column, fontsize=12)
    pyplot.show()

## 看数据的相关性
data_corr = data.corr().abs()
pyplot.subplots(figsize=(13, 9))
sns.heatmap(data_corr,annot=True)
pyplot.show()


#Set the threshold to select only highly correlated attributes
threshold = 0.5
corr_list = []
size = data_corr.shape[0]

#Search for the highly correlated pairs
for i in range(0, size): #for 'size' features
    for j in range(i+1,size): #avoid repetition
        if (data_corr.iloc[i,j] >= threshold and data_corr.iloc[i,j] < 1) or (data_corr.iloc[i,j] < 0 and data_corr.iloc[i,j] <= -threshold):
            corr_list.append([data_corr.iloc[i,j],i,j]) #store correlation and columns index

#Sort to show higher ones first
s_corr_list = sorted(corr_list,key=lambda x: -abs(x[0]))

#Print correlations and column names
for v,i,j in s_corr_list:
    print ("%s and %s = %.2f" % (data.columns[i],data.columns[j],v))

## 各个数据的相关性不高，并且基本全部是 int64 和 float64 ， 直接做数据的标准化
from sklearn.preprocessing import StandardScaler
# 初始化特征的标准化器
ss_X = StandardScaler()

# 分别对训练和测试数据的特征进行标准化处理
X_train = ss_X.fit_transform(data.drop("Outcome",axis=1))

