import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
from sklearn.preprocessing import Normalizer
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.decomposition import PCA

#以近期是否离职作为探索目标    
#s1:satisfaction_level--False:MinMaxScaler True:StandardScaler
#le:last_evaluation -- False:MinMaxScaler True:StandardScaler
#npr:number_project -- False:MinMaxScaler True:StandardScaler
#amh:average_monthly_hours -- False:MinMaxScaler True:StandardScaler
#tsc:time_spend_company -- False:MinMaxScaler True:StandardScaler
#wa:Work_accident -- False:MinMaxScaler True:StandardScaler
#pl5:promotion_last_5years -- False:MinMaxScaler True:StandardScaler
#由于department与salary是离散非数值，对其数值化
#dp:department -- False:LabelEncoding True:OneHotEncoding
#slr:salary -- False:LabelEncoding True:OneHotEncoding
#是否降维 lower_d  降维保留维数 ld_n
def hr_preprocessing(s1=False,le=False,npr=False,amh=False,tsc=False,wa=False,pl5=False,dp=False,slr=False,lower_d=False,ld_n=1):
    df=pd.read_csv(r'E:\pythoncode\coding-185\data\HR.csv')
#    1、清洗数据（除去异常值或抽样）
    df=df.dropna(subset=["satisfaction_level","last_evaluation"])
    df=df[df["satisfaction_level"]<=1][df["salary"]!="nme"]
#    2、得到标注
    label=df["left"]
    df=df.drop("left",axis=1)#以列进行删除
#    3、特征选择
#    由于特征原本较少，就不进行选择了
#    4、特征处理
#    satisfaction_level处于（0,1）最小值不为0，可以不做处理
#    也可做MinMaxScaler进行拉伸  也可用StandardScaler将其转化为均值为0，方差为1的格式
    scaler_lst=[s1,le,npr,amh,tsc,wa,pl5]
    column_lst=["satisfaction_level","last_evaluation","number_project","average_monthly_hours",\
                "time_spend_company","Work_accident","promotion_last_5years"]
    for i in range(len(scaler_lst)):
        if not scaler_lst[i]:#False
            df[column_lst[i]]=\
            MinMaxScaler().fit_transform(df[column_lst[i]].values.reshape(-1,1)).reshape(1,-1)[0]
        else:
            df[column_lst[i]]=\
            StandardScaler().fit_transform(df[column_lst[i]].values.reshape(-1,1)).reshape(1,-1)[0]
    scaler_lst=[dp,slr]
    column_lst=["department","salary"]
    for i in range(len(scaler_lst)):
        if not scaler_lst[i]:
            if column_lst[i] == "salary":
                df[column_lst[i]]=[map_salary(s) for s in df["salary"].values]
            else:
                df[column_lst[i]]=LabelEncoder().fit_transform(df[column_lst[i]])
#            在LabelEncoding后进行归一化
            df[column_lst[i]]=MinMaxScaler().fit_transform(df[column_lst[i]].values.reshape(-1,1))
        else:
            #使用pandas内置get_dummies进行onehotencoding
            df=pd.get_dummies(df,columns=[column_lst[i]])    
    if lower_d:#需要降维
        return PCA(n_components=ld_n).fit_transform(df.values),label
    return df,label

#由于默认LabelEncoder是根据字母顺序进行encoding 我们想要low 0 medium 1 high 2 重构函数
d=dict([("low",0),("medium",1),("high",2)])
def map_salary(s):
    return d.get(s,0)

def hr_modeling(features,label):
    from sklearn.model_selection import train_test_split #切分训练集与测试集
    f_v=features.values
    f_names=features.columns.values#特征名称
    l_v=label.values
    #区分出验证集 validation验证
    X_tt,X_validation,Y_tt,Y_validation=train_test_split(f_v,l_v,test_size=0.2)#验证集占20%
    #区分训练集和测试集
    X_train,X_test,Y_train,Y_test=train_test_split(X_tt,Y_tt,test_size=0.25)
#    print(len(X_train),len(X_validation),len(X_test))
     
    from sklearn.metrics import accuracy_score,recall_score,f1_score#衡量指标
    from sklearn.neighbors import NearestNeighbors,KNeighborsClassifier#KNN
    #    NearestNeighbors可直接获得一个点附近最近的几个点
    from sklearn.naive_bayes import GaussianNB,BernoulliNB#高斯朴素贝叶斯 伯努利朴素贝叶斯
#   在进行朴素贝叶斯方法时，所用特征必须为离散值
#   如果离散值都为二值（0,1），用BernoulliNB效果更好 如果特征值为连续值时 使用BernoulliNB方法时也会将其二值化 
#   GaussianNB假设特征是高斯分布的
    from sklearn.tree import DecisionTreeClassifier#决策树
    from sklearn.tree import export_graphviz
    from sklearn.svm import SVC
    from sklearn.ensemble import RandomForestClassifier#随机森林
    from sklearn.ensemble import AdaBoostClassifier
    from sklearn.linear_model import LogisticRegression
    from keras.models import Sequential#人工神经网络容器
    from keras.layers.core import Dense,Activation#神经网络层函数、激活函数
    from keras.optimizers import SGD#随机梯度下降算法
    from sklearn.ensemble import GradientBoostingClassifier
    
    #神经网络
    mdl=Sequential()#模型初始化
    mdl.add(Dense(50,input_dim=len(f_v[0])))#下一个隐含层的神经元个数个数50 输入的维度
    mdl.add(Activation("sigmoid"))#激活函数
    mdl.add(Dense(2))#输出层
    mdl.add(Activation("softmax"))#保证归一化 使用softmax
    sgd=SGD(lr=0.1)#lr 学习率
    mdl.compile(loss="mean_squared_error",optimizer="adam")#模型编译 loss最优化函数（损失函数） optimizer 优化器
#    np.array([[0,1] if i==1 else [1,0] for i in Y_train]) 由于需要onehot形式
    mdl.fit(X_train,np.array([[0,1] if i==1 else [1,0] for i in Y_train]),nb_epoch=10000\
            ,batch_size=8999)#nb_epoch 迭代次数 batch_size 随机梯度下降算法每次选取的数量
    xy_lst=[(X_train,Y_train),(X_validation,Y_validation),(X_test,Y_test)]
    import matplotlib.pyplot as plt
    from sklearn.metrics import roc_curve,auc,roc_auc_score
    f=plt.figure()#初始化
        
    for i in range(len(xy_lst)):
        X_part=xy_lst[i][0]
        Y_part=xy_lst[i][1]
#        Y_pred=mdl.predict_classes(X_part)
        #如果使用predict 输出的是连续值
        #当需要分类的时候 需要输入的是pridict_classes 这样会输出分类标注了
        Y_pred=mdl.predict(X_part)#得到的近似概率
        Y_pred=np.array(Y_pred[:,1].reshape(1,-1))[0]
        print("Y_pred:",Y_pred)
#        print(i)
#        print("NN","-ACC:",accuracy_score(Y_part,Y_pred))
#        print("NN","-REC:",recall_score(Y_part,Y_pred))
#        print("NN","-F1:",f1_score(Y_part,Y_pred))
        f.add_subplot(1,3,i+1)
        fpr,tpr,threshold=roc_curve(Y_part,Y_pred)
        plt.plot(fpr,tpr)
        print("NN","AUC",auc(fpr,tpr))
        print("NN","AUC_Score",roc_auc_score(Y_part,Y_pred))
    plt.show()
        
            
            
#    import os
#    os.environ["PATH"]+=os.pathsep+r"D:\graphviz-2.38\release\bin"#将graphviz加入环境变量
#    import pydotplus
#    
#    models=[]
#    models.append(("KNN",KNeighborsClassifier(n_neighbors=3)))
#    models.append(("GaussianNB",GaussianNB()))
#    models.append(("BernoulliNB",BernoulliNB()))
#    models.append(("DecisionTreeGini",DecisionTreeClassifier(min_impurity_split=0.1)))
#    #最小不纯度切分为0。1的基尼系数决策树
#    models.append(("DecisionTreeEntropy",DecisionTreeClassifier(criterion="entropy",min_samples_split=100)))
#    #最小样本切分为100的信息增益决策树
#    models.append(("SVM Classifier",SVC(C=1000)))
#    #C 惩罚系数 当一个数据分类错误的惩罚 C越大 SVM精度越高
#    models.append(("RandomForestClassifier",RandomForestClassifier()))
#    #RandomForestClassifier()相关参数见文档
#    models.append(("AdaBoost",AdaBoostClassifier()))
#    #AdaBoostClassifier()相关参数见文档
#    models.append(("LogisticRegression",LogisticRegression()))
#    models.append(("GBDT",GradientBoostingClassifier(max_depth=6,n_estimators=100)))#最大深度为6 树的数量为100
#    for clf_name,clf in models:
#        clf.fit(X_train,Y_train)
#        xy_lst=[(X_train,Y_train),(X_validation,Y_validation),(X_test,Y_test)]
#        for i in range(len(xy_lst)):
#            X_part=xy_lst[i][0]
#            Y_part=xy_lst[i][1]
#            Y_pred=clf.predict(X_part)
#            print(i)
#            print(clf_name,"-ACC:",accuracy_score(Y_part,Y_pred))
#            print(clf_name,"-REC:",recall_score(Y_part,Y_pred))
#            print(clf_name,"-F1:",f1_score(Y_part,Y_pred))
#            dot_data=export_graphviz(clf,out_file=None,feature_names=f_names,\
#                                     class_names=["NL","L"],filled=True,\
#                                     rounded=True,special_characters=True)
            #绘制
#            graph = pydotplus.graph_from_dot_data(dot_data)
#            graph.write_pdf("dt_tree2.pdf")#保存为pdf格式
    #模型的保存
#    from sklearn.externals import joblib
#    joblib.dump(knn_clf,"knn_clf")#保存并命名
#    knn_clf=joblib.load("knn_clf")#加载模型
    
def regr_test(features,label):
    print("X",features)
    print("Y",label)
    from sklearn.metrics import mean_squared_error,mean_absolute_error,r2_score#MSE MAE R2
    from sklearn.linear_model import LinearRegression
    from sklearn.linear_model import Ridge#岭回归
    from sklearn.linear_model import Lasso
#    regr=LinearRegression()
#    regr=Ridge(alpha=0.5)
    regr=Lasso(alpha=0.01)
    regr.fit(features.values,label.values)
    Y_pred=regr.predict(features.values)
    print("Coef:",regr.coef_)#参数
    print("MSE:",mean_squared_error(label.values,Y_pred))#平均平方误差
    print("MAE:",mean_absolute_error(label.values,Y_pred))
    print("R2:",r2_score(label.values,Y_pred))
    

def main():
    features,label=hr_preprocessing()
    regr_test(features[["number_project","average_monthly_hours"]],features["last_evaluation"])
#    hr_modeling(features,label)

if __name__=="__main__":
    main()
    

    
    
    
    
    
    
    