import warnings
warnings.filterwarnings("ignore")
import numpy as np
import time
from pandas import read_excel
from pandas import DataFrame
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
import matplotlib.pyplot as plt
# https://www.cnblogs.com/gdjlc/p/11409804.html

df = read_excel(r'C:\Users\zll\Desktop\JMS_\RawData.xlsx',Sheetname='Sheet1',header=0 )
len1 = df['Case ID'].__len__()
continuous_conds = df.ix[:,['StartTime1',  'EndTime1', 'Cost1',
             'StartTime2',  'EndTime2', 'Cost2',
             'StartTime3',  'EndTime3', 'Cost3',
             'StartTime4',  'EndTime4', 'Cost4',
             'StartTime5',  'EndTime5', 'Cost5',
             'StartTime6',  'EndTime6', 'Cost6',
             'StartTime7',  'EndTime7', 'Cost7',
             'StartTime8',  'EndTime8', 'Cost8',
             'StartTime9',  'EndTime9', 'Cost9'
            ]].values

continuous_res = np.zeros([len1, 27])
for i in range(len1):
    for j in range(27):
        if type(continuous_conds[i, j]) == str:
                continuous_res[i, j] = int(continuous_conds[i, j].split(":")[0]) * 3600 + int(continuous_conds[i, j].split(":")[1]) * 60 + int(continuous_conds[i, j].split(":")[2])
        elif type(continuous_conds[i, j]) == int or type(continuous_conds[i, j]) == float:
            continuous_res[i, j] = continuous_conds[i, j]
        else:
            continuous_res[i, j] = continuous_conds[i, j].hour *  3600 + continuous_conds[i, j].minute * 60 + continuous_conds[i, j].second

dispersed_conds = df.ix[:,[
                           'Register_Request',  'Reason1', 'Resource1',   #1, 3, 1   5
                           'Date_Check', 'Type2', 'Resource2',            #1, 2, 1   9
                           'Mode_Audit',  'Resource3',   #1,  1   11
                           'Manual_Review',  'Resource4', #2,  2   15
                           'Reason_Review', 'Result5', 'Resource5',  #2, 3, 2   22
                           'Ticket_Check', 'Valid6', 'Resource6',    # 2, 3, 2   29
                           'Casually_Examine', 'Result7', 'Resource7', # 2, 3, 2   37
                           'Thoroughly_Examine', 'Result8', 'Resource8', # 2, 3, 2   43
                           'Decide',  'Resource9'  # 2,  2   47
             ]].values

results  = df['Accept'].values
costs =  df.ix[:,['Cost10', 'Cost11']].values
resources = df.ix[:,['Resource10', 'Resource11']].values

for i in range(len1):
    # results[i] = 1 if results[i] == 'Accept' else 0
    costs[i, 0] = costs[i, 0] + costs[i, 1]
    resources[i, 0] = resources[i, 0] if resources[i, 1] == "None" else resources[i, 1]

le = LabelEncoder()
oh = OneHotEncoder(sparse=False)
results = le.fit_transform(results)
for i in range(len(dispersed_conds[0])):
    dispersed_conds[:, i] = le.fit_transform(dispersed_conds[:, i])
oh.fit(dispersed_conds)
onehot_enc = oh.transform(dispersed_conds)

#将同一index归并
arr1 = [5, 9, 11, 15, 22, 29, 36, 43, 47]
to_traineAttrs = onehot_enc[:, :5]
for i in range(1, len(arr1)):
    to_traineAttrs = np.hstack((to_traineAttrs, continuous_res[:, 3*(i-1): 3*i], onehot_enc[:, arr1[i-1]:arr1[i]]))
# to_traineAttrs = np.hstack((to_traineAttrs, continuous_res[:, 21:24], onehot_enc[:, 48:]))
res = np.hstack((to_traineAttrs, continuous_res[:, 24:]))

# [8]
table1 = res[:, :8].copy()
# [8, 15]
table2 = res[:, :15].copy()
# [8, 15, 20]
table3 = res[:, :20].copy()
# [ 8,  15,  20,  27,  37]
table4 = res[:, :37].copy()

res1 = []
results1 = []
for i in range(len(res)):
    if continuous_res[i, 9] == 0:
        res1.append(res[i])
        results1.append(results[i])
res1 = np.array(res1)
results1 = np.array(results1)
# [5, 9, 11, 15,  22,  29, 36, 43, 47]
#  A，    B,    C,     D,     E,     H,      F,    G,     I
# [0-8, 8-15, 15-20, 20-27, 27-37, 37-47, 47-57, 57-67, 67-74]
res1 = np.hstack((res1[:, :20], res1[:, 27:]))
table5 = res1[:, :60].copy()
for i in range(len(table5)):
    if table5[i, 57] + table5[i, 47] < table5[i, 37]:
        table5[i, 37] = 0
        table5[i, 38] = 0
        table5[i, 39] = 0
    else:
        table5[i, 57] = 0
        table5[i, 58] = 0
        table5[i, 59] = 0
        table5[i, 47] = 0
        table5[i, 48] = 0
        table5[i, 49] = 0
table6 = res1[:, :60].copy()
table7 = res1.copy()

# decisionTree
print("decisionTree:")
for table in [table1, table2, table3, table4]:
    X_train, X_test, y_train, y_test = train_test_split( table , results, random_state=0, test_size=.2 )
    clf = DecisionTreeClassifier(criterion='entropy', min_samples_leaf=3)
    clf.fit(X_train, y_train)
    y_pred = clf.predict(X_test)
    print("Accuracy on train set: {:.4f}".format(clf.score(X_train, y_train)))
    print("Accuracy on test set: {:.4f}".format(clf.score(X_test, y_test)))
    accuracy = accuracy_score(y_test, y_pred)
    print("accuarcy: %.2f%%" % (accuracy * 100.0))
    fpr, tpr, thresholds = roc_curve(y_test, y_pred)
    # 画出roc曲线
    plt.plot(fpr, tpr, marker='o')
    plt.show()
    AUC = auc(fpr, tpr)
    print("AUC is :{:.2f}".format(AUC))

for table in [table5, table6, table7]:
    X_train, X_test, y_train, y_test = train_test_split(table, results1, random_state=0, test_size=.2)
    clf = DecisionTreeClassifier(criterion='entropy', min_samples_leaf=3)
    clf.fit(X_train, y_train)
    y_pred = clf.predict(X_test)
    print("Accuracy on train set: {:.4f}".format(clf.score(X_train, y_train)))
    print("Accuracy on test set: {:.4f}".format(clf.score(X_test, y_test)))
    accuracy = accuracy_score(y_test, y_pred)
    print("accuarcy: %.2f%%" % (accuracy * 100.0))
    fpr, tpr, thresholds = roc_curve(y_test, y_pred)
    # 画出roc曲线
    plt.plot(fpr, tpr, marker='o')
    plt.show()
    AUC = auc(fpr, tpr)
    print("AUC is :{:.2f}".format(AUC))

# randomoreset
print("randomoreset")
for table in [table1, table2, table3, table4]:
    X_train, X_test, y_train, y_test = train_test_split( table , results, random_state=0, test_size=.2 )
    tree = RandomForestClassifier(random_state=0)
    tree.fit(X_train, y_train)
    y_pred = tree.predict(X_test)
    print("Accuracy on train set: {:.4f}".format(tree.score(X_train, y_train)))
    print("Accuracy on test set: {:.4f}".format(tree.score(X_test, y_test)))
    accuracy = accuracy_score(y_test, y_pred)
    print("accuarcy: %.2f%%" % (accuracy * 100.0))
    fpr, tpr, thresholds = roc_curve(y_test, y_pred)
    # 画出roc曲线
    plt.plot(fpr, tpr, marker='o')
    plt.show()
    AUC = auc(fpr, tpr)
    print("AUC is :{:.2f}".format(AUC))

for table in [table5, table6, table7]:
    X_train, X_test, y_train, y_test = train_test_split(table, results1, random_state=0, test_size=.2)
    tree = RandomForestClassifier(random_state=0)
    tree.fit(X_train, y_train)
    y_pred = tree.predict(X_test)
    print("Accuracy on train set: {:.4f}".format(tree.score(X_train, y_train)))
    print("Accuracy on test set: {:.4f}".format(tree.score(X_test, y_test)))
    accuracy = accuracy_score(y_test, y_pred)
    print("accuarcy: %.2f%%" % (accuracy * 100.0))
    fpr, tpr, thresholds = roc_curve(y_test, y_pred)
    # 画出roc曲线
    plt.plot(fpr, tpr, marker='o')
    plt.show()
    AUC = auc(fpr, tpr)
    print("AUC is :{:.2f}".format(AUC))

# xgboost
print("xgboost")
# https://www.cnblogs.com/wanglei5205/p/8578486.html
for table in [table1, table2, table3, table4]:
    X_train, X_test, y_train, y_test = train_test_split( table , results, random_state=0, test_size=.2 )
    model = XGBClassifier()
    model.fit(X_train, y_train)
    y_pred = model.predict(X_test)
    print("Accuracy on train set: {:.4f}".format(model.score(X_train, y_train)))
    print("Accuracy on test set: {:.4f}".format(model.score(X_test, y_test)))
    accuracy = accuracy_score(y_test, y_pred)
    print("accuarcy: %.2f%%" % (accuracy * 100.0))
    fpr, tpr, thresholds = roc_curve(y_test, y_pred)
    # 画出roc曲线
    plt.plot(fpr, tpr, marker='o')
    plt.show()
    AUC = auc(fpr, tpr)
    print("AUC is :{:.2f}".format(AUC))

for table in [table5, table6, table7]:
    X_train, X_test, y_train, y_test = train_test_split(table, results1, random_state=0, test_size=.2)
    model = XGBClassifier()
    model.fit(X_train, y_train)
    y_pred = model.predict(X_test)
    print("Accuracy on train set: {:.4f}".format(model.score(X_train, y_train)))
    print("Accuracy on test set: {:.4f}".format(model.score(X_test, y_test)))
    accuracy = accuracy_score(y_test, y_pred)
    print("accuarcy: %.2f%%" % (accuracy * 100.0))
    fpr, tpr, thresholds = roc_curve(y_test, y_pred)
    # 画出roc曲线
    plt.plot(fpr, tpr, marker='o')
    plt.show()
    AUC = auc(fpr, tpr)
    print("AUC is :{:.2f}".format(AUC))

print("done")