from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
import numpy as np
import pandas as pd
import matplotlib as plt
import seaborn as sns
import warnings
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier

warnings.filterwarnings('ignore')



# 数据导入
data = pd.read_csv('../data/train.csv')
test = pd.read_csv('../data/test.csv')
# data['MaritalStatus'].unique()
# data['Gender'].unique()

# 创建映射表
map_Bus = {
    'Non-Travel': 0,
    'Travel_Rarely': 1,
    'Travel_Frequently':2,
    'Human Resources':0,
    'Research & Development':1,
    'Sales':2,
    'Divorced':0,
    'Single':1,
    'Married':2,
    'Male':0,
    'Female':1
}
# 映射替换为数值形式
data['MaritalStatus'] = data['MaritalStatus'].map(map_Bus)
data['BusinessTravel'] = data['BusinessTravel'].map(map_Bus)
data['Department'] = data['Department'].map(map_Bus)
data['Gender'] = data['Gender'].map(map_Bus)

test['MaritalStatus'] = test['MaritalStatus'].map(map_Bus)
test['BusinessTravel'] = test['BusinessTravel'].map(map_Bus)
test['Department'] = test['Department'].map(map_Bus)
test['Gender'] = test['Gender'].map(map_Bus)

# 对数据进行热编码
data_dum = pd.get_dummies(data).astype(int)
test_dum = pd.get_dummies(test).astype(int)
test_dum.drop('Attrition',axis = 1,inplace = True)
# 经过观察，这两列的数据唯一可能影响小

data_dum.drop('Over18_Y',axis = 1,inplace = True)
data_dum.drop('StandardHours',axis = 1,inplace = True)
data_dum.drop('EmployeeNumber',axis = 1,inplace = True)

# 组合各个满意度
data_dum['STA'] = data_dum['EnvironmentSatisfaction']+data_dum['JobSatisfaction']+data_dum['RelationshipSatisfaction']
test_dum['STA'] = test_dum['EnvironmentSatisfaction']+test_dum['JobSatisfaction']+test_dum['RelationshipSatisfaction']
# 组合工作满意度和工作投入
data_dum['job_ment_sta'] = data_dum['JobSatisfaction']/data_dum['JobInvolvement']
test_dum['job_ment_sta'] = test_dum['JobSatisfaction']/test_dum['JobInvolvement']
# 组合总工作年限和服务几家公司
data_dum['avg_year_work'] = (data_dum['NumCompaniesWorked']+1)/(data_dum['TotalWorkingYears']+1)
test_dum['avg_year_work'] = (test_dum['NumCompaniesWorked']+1)/(test_dum['TotalWorkingYears']+1)
# Age+总工作年限
data_dum['job_ment_sta'] = data_dum['JobSatisfaction']/data_dum['JobInvolvement']
test_dum['job_ment_sta'] = test_dum['JobSatisfaction']/test_dum['JobInvolvement']

# 删除特征列
data_dum.drop('EnvironmentSatisfaction',axis = 1,inplace = True)
data_dum.drop('JobSatisfaction',axis = 1,inplace = True)
data_dum.drop('RelationshipSatisfaction',axis = 1,inplace = True)
data_dum.drop('NumCompaniesWorked',axis = 1,inplace = True)
data_dum.drop('TotalWorkingYears',axis = 1,inplace = True)

test_dum.drop('EnvironmentSatisfaction',axis = 1,inplace = True)
test_dum.drop('JobSatisfaction',axis = 1,inplace = True)
test_dum.drop('RelationshipSatisfaction',axis = 1,inplace = True)
test_dum.drop('NumCompaniesWorked',axis = 1,inplace = True)
test_dum.drop('TotalWorkingYears',axis = 1,inplace = True)

test_dum.drop('Over18_Y',axis = 1,inplace = True)
test_dum.drop('StandardHours',axis = 1,inplace = True)
test_dum.drop('EmployeeNumber',axis = 1,inplace = True)



x_train = data_dum.iloc[:,1:]
y_train = data_dum.iloc[:,0]
x_test = test_dum.iloc[:,:]
y_test = test.iloc[:,-1]

# 标准化一下
std = StandardScaler()
x_train = std.fit_transform(x_train)
x_test = std.transform(x_test)


# # 创建模型KNN
# model_KNN = KNeighborsClassifier()
# param_dict1 = {'n_neighbors':[i for i in range(1,10)]}
# es1 = GridSearchCV(model_KNN,param_grid=param_dict1,cv=4)
# es1.fit(x_train,y_train)
# # 预测
# y_pre1 = es1.predict(x_test)
# print('KNN')
# print(es1.best_score_)
# print(es1.best_params_)
# print(accuracy_score(y_test,y_pre1))
# print(roc_auc_score(y_test,es1.predict_proba(x_test)[:, 1]))

# 创建逻辑回归模型
model_log = LogisticRegression()
es2 = model_log.fit(x_train,y_train)
y_pre2 = es2.predict(x_test)
print('逻辑回归')
print(accuracy_score(y_test,y_pre2))
print(roc_auc_score(y_test,es2.predict_proba(x_test)[:, 1]))


# # 创建决策树模型
# model_tree = DecisionTreeClassifier()
# es3 = model_tree.fit(x_train,y_train)
# y_pre3 = es3.predict(x_test)
# print('决策树')
# print(accuracy_score(y_test,y_pre3))
# print(roc_auc_score(y_test,es3.predict_proba(x_test)[:, 1]))
#
# # 随机森林模型
# model_forest = RandomForestClassifier()
# es4 = model_forest.fit(x_train,y_train)
# y_pre4 = es4.predict(x_test)
# print('随机森林')
# print(accuracy_score(y_test,y_pre4))
# print(roc_auc_score(y_test,es4.predict_proba(x_test)[:, 1]))
#
# # GBDT模型
# model_GBDT = GradientBoostingClassifier()
# es5 = model_GBDT.fit(x_train,y_train)
# y_pre5 = es5.predict(x_test)
# print('GBDT')
# print(accuracy_score(y_test,y_pre5))
# print(roc_auc_score(y_test,es5.predict_proba(x_test)[:, 1]))
#
# # XGB模型
# model_xgb = XGBClassifier(objective='binary:logistic')
# par = {
#     'max_depth':[3,5,7],
#     'n_estimators':[100,150,200,250,300],
#     'learning_rate':[0.1,0.05,0.01]
# }
# model_cv = GridSearchCV(model_xgb,param_grid=par,cv = 5)
# es6 = model_cv.fit(x_train,y_train)
# y_pre6 = es6.predict(x_test)
# print('XGB-CV')
# print(es6.best_params_)
# print(accuracy_score(y_test,y_pre6))
# print(roc_auc_score(y_test,es6.predict_proba(x_test)[:, 1]))
