import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler as std
from sklearn.model_selection import train_test_split as tts
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn import svm
import matplotlib.pyplot as plt
from sklearn.model_selection import cross_val_score
from lightgbm import LGBMClassifier as lcf
from sklearn.metrics import roc_curve,roc_auc_score
'''
@Author: ma shanlin
@Function:
    1.Load_Data :载入数据
    2.Variance :获取方差
    3.Standard_Deviation :获取标准差
    4.Covariance :获取协方差
    5.Coeffcient :获取相关系数
    6.Get_Feature_Label 分离特征值和标签
    7.Discretization 对连续数据进行离散化
    8.
@Parameters：
    Somthing_List:
        1.Feature 特征值
        2.Label   是否患病
        3.Classify_Machine 分类模型
    p_Something :作为参数被传递的某值x
    d_Something :作为数据被读取的某值
'''
plt.rcParams['font.sans-serif'] = ['Arial Unicode MS']    #让plt画出的图像能够显示中文字体(windows系统可能需要更改字体名称)
plt.rcParams['axes.unicode_minus'] = False

# -------------------数据载入------------------- 
#在数据集中加入了BMI指数
def Load_Data():
    fileLocal = './Data.csv'
    load_Data = pd.read_csv(fileLocal)
    data = load_Data.dropna()
    data.drop(['id'],axis=1,inplace=True)
    data['age'] = round(data['age']/365)
    data['age'] = data['age'].astype(int)
    data['Bmi'] = data['weight']/(np.square(data['height']/100))
    #print(data)
    return data

DATA = Load_Data()

# ---------------------方差----------------------
'''
    @Function: 求特征值的方差 standard deviation  
    @Parameters: X1 特征值的列
'''
def Variance(X1):
    x1_Len = len(X1)
    #print(x1_len)
    x1_Sum = 0
    for i in range(x1_Len):
        x1_Sum += X1[i]
    x1_Avg = x1_Sum/x1_Len
    x1_Avg = round(x1_Avg)
    print(x1_Avg)
    
    variance_Sum = 0
    for i in range(x1_Len):
        variance_Sum += np.square(X1[i]-x1_Avg)
    
    variance_Result = variance_Sum/x1_Len
    #print(variance_sum)        
    return variance_Result
    
# -------------------标准差-------------------
'''
    @Function: 求标准差 standard deviation  
    @Parameters: p_Variance 特征值的方差
'''
def Standard_Deviation(p_Variance):
    return np.sqrt(p_Variance)

# -------------------协方差-------------------
'''
    @Function: 求两个特征值之间的协方差 Covariance
    @Parameters: x_1：特征值列表1
                 x_2：特征值列表2
'''
def Covariance(x_1,x_2):
    covariance_Sum = 0
    x1_Len = len(x_1)
    x2_Len = len(x_2)
    x1_Sum = 0
    x2_Sum = 0
    for i in range(x1_Len):
        x1_Sum += x_1[i]
    x1_Avg = x1_Sum/x1_Len
    for i in range(x2_Len):
        x2_Sum += x_2[i]
    x2_Avg = x1_Sum/x1_Len
    #print(x1_len)
    for i in range(x1_Len):
        covariance_Sum += (x_1[i]-x1_Avg)*(x_2[i]-x2_Avg) #协方差公式
    covariance_Result = covariance_Sum/x1_Len
    return covariance_Result


# -------------------相关系数-------------------
'''
    @Function: 求两个特征值的相关系数 Coeffcien
    @Parameters: x_1：特征值列表1
                 x_2：特征值列表2
'''
def Coeffcient(x_1,x_2):
    variance1 = Variance(x_1)
    variance2 = Variance(x_2)
    std_v1 = Standard_Deviation(variance1)
    std_v2 = Standard_Deviation(variance2)
    covariance = Covariance(x_1,x_2)
    return round(covariance/(std_v1*std_v2),2)


#----------------------离散化函数---------------------------
'''
    @Function: 将某一列的数据进行离散化 Discretization
    @Parameters: p_Col:被离散化的列名
'''
def Discretization(p_Col):
    col_array = np.array(p_Col)
    cut_result = pd.cut(col_array,5,labels=[1,2,3,4,5])
    d_Col_Data = cut_result.tolist()
    return d_Col_Data

#----------------------集体离散化函数-------------------------
def All_Discretization(p_Data):
    d_Age = Discretization(p_Data['age'])
    d_Height = Discretization(p_Data['height'])
    d_Weight = Discretization(p_Data['weight'])
    d_Aphi = Discretization(p_Data['ap_hi'])
    d_Aplo = Discretization(p_Data['ap_lo'])
    d_Bmi = Discretization(p_Data['Bmi'])
    return d_Age,d_Height,d_Weight,d_Aphi,d_Aplo,d_Bmi
    
    
#---------------获取先验概率P(A)的列表----------------------------
'''
    @Function: 求某个特征值中每个取值的先验概率
    @Parameters: col: 特征值列表
'''
def Get_Previous_Percent(col):
    unique_Number = col.value_counts()
    number_Array = np.array(unique_Number)
    Sum = np.sum(number_Array)
    index_List = np.array(unique_Number.index)
    percent_Variable_List = {}
    for i in range(len(number_Array)):
        index = index_List[i]
        value = number_Array[i]
        percent = value/Sum
        percent_Variable_List.update({str(index):str(percent)})
    #print(percent_Variable_List)
    return percent_Variable_List
    
#--------------分离特征值和标签-------------------
'''
    @Function: 将数据集分解成特征值和标签
    @Parameters: p_Data: 数据集
'''
def Get_Feature_Label(p_Data):
    d_Feature = p_Data.drop(['cardio'],axis=1)
    d_Label = p_Data['cardio']
    return d_Feature,d_Label

#-------------将数据归一化------------------------
'''
    @Function: 对数据集中的特征值进行归一化并返回归一化结果
    @Parameters: p_Data: 数据集
'''
def Get_Data_Standard(p_Data):
    feature,label = Get_Feature_Label(p_Data)
    d_Feature = std().fit_transform(feature)
    d_Label = label
    return d_Feature,d_Label

#----------------------------训练集和测试集划分----------------------------------
'''
    @Function: 将数据集和测试集划分开
    @Parameters: p_Feature: 特征值列表
                 p_Label:标签列表
'''
def Split_Train_Test(p_Feature,p_Label):
    d_X_Train,d_X_Test,d_Y_Train,d_Y_Test = tts(p_Feature,p_Label,test_size=0.2,random_state=1)
    return d_X_Train,d_X_Test,d_Y_Train,d_Y_Test

#--------------------获取训练后的预测模型-----------------------
'''
    @Function: 拟合最后的预测模型
    @Parameters: p_X_Train: 训练集的特征值
                 p_Y_Train: 训练集的标签
'''
def Get_Model(p_X_Train,p_Y_Train,model_name):
    if model_name == 'KNN':
        model = KNeighborsClassifier(n_neighbors=9,weights='distance')
    elif model_name == 'DecisionTree':
        model = DecisionTreeClassifier()
    elif model_name == 'RandomForest':
        model = RandomForestClassifier()
    elif model_name == 'GBM':
        model = lcf(num_leaves=31,learning_rate=0.07)
    model.fit(p_X_Train,p_Y_Train)
    #d_Cross_Val = cross_val_score(model,p_X_Train,p_Y_Train,cv=10,scoring='neg_mean_squared_error').mean()
    #print(d_Cross_Val)
    return model

#------------------------绘制不同特征值的百分比-----------------------------
'''
    @Function: 将数据集和测试集划分开
    @Parameters: p_Variable：绘图的列名
'''
def Paint_Piechart(p_Variable):
    variable_Percent_List = Get_Previous_Percent(DATA[p_Variable])
    plt.axes(aspect='equal')
    plt.pie(x=variable_Percent_List.values()
            ,labels=variable_Percent_List.keys()
            ,autopct='%.3f%%'
            ,pctdistance=0.8
            ,labeldistance=1.2
            ,startangle=180
            ,radius=1.3
            ,counterclock=False)
    plt.show()
    

#--------------分别获得TP，TN，FP，FN的值----------------------
'''
    @Function: 求两个特征值的相关系数 Coeffcien
    @Parameters: p_Predict_List 预测结果列表
                 p_Real_List 真实结果列表
'''
def Get_Pre_Rec_Value(p_Predict_List,p_Real_List):
    Tp = 0
    Tn = 0
    Fp = 0
    Fn = 0
    for i in range(len(p_Predict_List)):
        if p_Real_List[i] == 1 and p_Predict_List[i] == 1:
            Tp += 1
        elif p_Real_List[i] == 0 and p_Predict_List[i] == 0:
            Tn += 1
        elif p_Real_List[i] == 0 and p_Predict_List[i] == 1:
            Fp += 1
        elif p_Real_List[i] == 1 and p_Predict_List[i] == 0:
            Fn += 1
    
    d_Precision = Tp/(Tp+Fp)
    d_Recall = Tp/(Tp+Fn)
    
    n=0
    for i in range(len(p_Real_List)):
        if(p_Predict_List[i] != p_Real_List[i]):
            n += 1
    error_rate = n/len(p_Real_List)
    #print("一共错误了%d个！错误率为%.2f%%" % (n,error_rate*100))
    
    return d_Precision,d_Recall

#--------------------------------对数据离散化后对四个模型集体输出预测结果-----------------------------------------
def Get_Four_Model_Dis(p_Data,p_Model_Name):
    p_Data['age'],p_Data['height'],p_Data['weight'],p_Data['ap_hi'],p_Data['ap_lo'],p_Data['Bmi'] = All_Discretization(p_Data)
    feature,label = Get_Feature_Label(p_Data)
    x_train,x_test,y_train,y_test = Split_Train_Test(feature,label)
    y_test = np.array(y_test)
    model = Get_Model(x_train,y_train,p_Model_Name)
    predict_list = model.predict(x_test)
    precison,recall = Get_Pre_Rec_Value(predict_list,y_test)
    if p_Model_Name == 'GBM':
        Paint_Roc_Curve(y_test,predict_list)
    print("进行离散化后，%s的准确率的得分为:%.2f%%,召回率的得分为%.2f%%" % (p_Model_Name,precison,recall))

#---------------------------------对数据归一化后对四个模型集体输出预测结果-----------------------------------------
def Get_Four_Model_Standard(p_Data,p_Model_Name):
    feature,label = Get_Data_Standard(p_Data)
    x_train,x_test,y_train,y_test = Split_Train_Test(feature,label)
    y_test = np.array(y_test)
    model = Get_Model(x_train,y_train,p_Model_Name)
    predict_list = model.predict(x_test)
    precison,recall = Get_Pre_Rec_Value(predict_list,y_test)
    if p_Model_Name == 'GBM':
        Paint_Roc_Curve(y_test,predict_list)
    print("进行归一化后，%s的准确率的得分为:%.2f%%,召回率的得分为%.2f%%" % (p_Model_Name,precison,recall))

#---------------------------------不对数据进行操作 四个模型集体输出预测结果-----------------------------------
def Get_Four_Model(p_Data,p_Model_Name):
    feature,label = Get_Feature_Label(p_Data)
    x_train,x_test,y_train,y_test = Split_Train_Test(feature,label)
    y_test = np.array(y_test)
    model = Get_Model(x_train,y_train,p_Model_Name)
    predict_list = model.predict(x_test)
    precison,recall = Get_Pre_Rec_Value(predict_list,y_test)
    if p_Model_Name == 'GBM':
        Paint_Roc_Curve(y_test,predict_list)
    print("不进行操作，%s的准确率的得分为:%.2f%%,召回率的得分为%.2f%%" % (p_Model_Name,precison,recall))
    
#-----------------------------------------绘制ROC曲线--------------------------------------------------
def Paint_Roc_Curve(p_Test_List,p_Predict_List):
    auc = roc_auc_score(p_Test_List,p_Predict_List)
    fpr,tpr,thresholds = roc_curve(p_Test_List,p_Predict_List)
    plt.plot(fpr,tpr,color='darkorange',label='ROC curve (area = %0.2f)' % auc)
    plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])        #设置坐标轴范围  
    plt.xlabel('假正率')
    plt.ylabel('真正率')
    plt.title('GBM算法的ROC曲线')
    plt.legend(loc="lower right")
    plt.show()