import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
from sklearn.preprocessing import Normalizer
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.decomposition import PCA

#以近期是否离职作为探索目标    
#s1:satisfaction_level--False:MinMaxScaler True:StandardScaler
#le:last_evaluation -- False:MinMaxScaler True:StandardScaler
#npr:number_project -- False:MinMaxScaler True:StandardScaler
#amh:average_monthly_hours -- False:MinMaxScaler True:StandardScaler
#tsc:time_spend_company -- False:MinMaxScaler True:StandardScaler
#wa:Work_accident -- False:MinMaxScaler True:StandardScaler
#pl5:promotion_last_5years -- False:MinMaxScaler True:StandardScaler
#由于department与salary是离散非数值，对其数值化
#dp:department -- False:LabelEncoding True:OneHotEncoding
#slr:salary -- False:LabelEncoding True:OneHotEncoding
#是否降维 lower_d  降维保留维数 ld_n
def hr_preprocessing(s1=False,le=False,npr=False,amh=False,tsc=False,wa=False,pl5=False,dp=False,slr=False,lower_d=False,ld_n=1):
    df=pd.read_csv(r'E:\pythoncode\coding-185\data\HR.csv')
#    1、清洗数据（除去异常值或抽样）
    df=df.dropna(subset=["satisfaction_level","last_evaluation"])
    df=df[df["satisfaction_level"]<=1][df["salary"]!="nme"]
#    2、得到标注
    label=df["left"]
    df=df.drop("left",axis=1)#以列进行删除
#    3、特征选择
#    由于特征原本较少，就不进行选择了
#    4、特征处理
#    satisfaction_level处于（0,1）最小值不为0，可以不做处理
#    也可做MinMaxScaler进行拉伸  也可用StandardScaler将其转化为均值为0，方差为1的格式
    scaler_lst=[s1,le,npr,amh,tsc,wa,pl5]
    column_lst=["satisfaction_level","last_evaluation","number_project","average_monthly_hours",\
                "time_spend_company","Work_accident","promotion_last_5years"]
    for i in range(len(scaler_lst)):
        if not scaler_lst[i]:#False
            df[column_lst[i]]=\
            MinMaxScaler().fit_transform(df[column_lst[i]].values.reshape(-1,1)).reshape(1,-1)[0]
        else:
            df[column_lst[i]]=\
            StandardScaler().fit_transform(df[column_lst[i]].values.reshape(-1,1)).reshape(1,-1)[0]
    scaler_lst=[dp,slr]
    column_lst=["department","salary"]
    for i in range(len(scaler_lst)):
        if not scaler_lst[i]:
            if column_lst[i] == "salary":
                df[column_lst[i]]=[map_salary(s) for s in df["salary"].values]
            else:
                df[column_lst[i]]=LabelEncoder().fit_transform(df[column_lst[i]])
#            在LabelEncoding后进行归一化
            df[column_lst[i]]=MinMaxScaler().fit_transform(df[column_lst[i]].values.reshape(-1,1))
        else:
            #使用pandas内置get_dummies进行onehotencoding
            df=pd.get_dummies(df,columns=[column_lst[i]])    
    if lower_d:#需要降维
        return PCA(n_components=ld_n).fit_transform(df.values),label
    return df,label

#由于默认LabelEncoder是根据字母顺序进行encoding 我们想要low 0 medium 1 high 2 重构函数
d=dict([("low",0),("medium",1),("high",2)])
def map_salary(s):
    return d.get(s,0)

def hr_modeling(features,label):
    from sklearn.model_selection import train_test_split #切分训练集与测试集
    f_v=features.values
    l_v=label.values
    #区分出验证集 validation验证
    X_tt,X_validation,Y_tt,Y_validation=train_test_split(f_v,l_v,test_size=0.2)#验证集占20%
    #区分训练集和测试集
    X_train,X_test,Y_train,Y_test=train_test_split(X_tt,Y_tt,test_size=0.25)
#    print(len(X_train),len(X_validation),len(X_test))
     
    from sklearn.metrics import accuracy_score,recall_score,f1_score#衡量指标
    from sklearn.neighbors import NearestNeighbors,KNeighborsClassifier#KNN
    #    NearestNeighbors可直接获得一个点附近最近的几个点
    from sklearn.naive_bayes import GaussianNB,BernoulliNB#高斯朴素贝叶斯 伯努利朴素贝叶斯
#   在进行朴素贝叶斯方法时，所用特征必须为离散值
#   如果离散值都为二值（0,1），用BernoulliNB效果更好 如果特征值为连续值时 使用BernoulliNB方法时也会将其二值化 
#   GaussianNB假设特征是高斯分布的
    models=[]
    models.append(("KNN",KNeighborsClassifier(n_neighbors=3))) 
    models.append(("GaussianNB",GaussianNB()))
    models.append(("BernoulliNB",BernoulliNB()))
    for clf_name,clf in models:
        clf.fit(X_train,Y_train)
        xy_lst=[(X_train,Y_train),(X_validation,Y_validation),(X_test,Y_test)]
        for i in range(len(xy_lst)):
            X_part=xy_lst[i][0]
            Y_part=xy_lst[i][1]
            Y_pred=clf.predict(X_part)
            print(i)
            print(clf_name,"-ACC:",accuracy_score(Y_part,Y_pred))
            print(clf_name,"-REC:",recall_score(Y_part,Y_pred))
            print(clf_name,"-F1:",f1_score(Y_part,Y_pred))
     
    #模型的保存
#    from sklearn.externals import joblib
#    joblib.dump(knn_clf,"knn_clf")#保存并命名
#    knn_clf=joblib.load("knn_clf")#加载模型
    
def main():
    features,label=hr_preprocessing()
    hr_modeling(features,label)

if __name__=="__main__":
    main()
    

    
    
    
    
    
    
    