import warnings
warnings.filterwarnings("ignore")
import numpy as np
from pandas import read_excel
from pandas import DataFrame
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
import time
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
import matplotlib.pyplot as plt

# https://www.cnblogs.com/gdjlc/p/11409804.html

#read excel and fill order row.
df = read_excel(r'C:\Users\zll\Desktop\JMS_\EventLog.xlsx',Sheetname='Sheet1',header=0 )
ids = df["Case ID"]
cnts = [1]
for i in range(1, len(ids)):
    if ids[i] == ids[i-1]:
        cnts.append(cnts[i-1] + 1)
    else:
        cnts.append(1)
df['Order'] = cnts

#create dict by length, divide into buckets.
sub_tables = [[] for i in range(8)]

for i in range(0, len(ids) - 1):
    tmpList = df.loc[i].values.tolist()[1:-2]
    tmpList[1] = tmpList[1].value / 1000000000
    tmpList[2] = tmpList[2].value / 1000000000
    if df['Order'][i] != 1 and df['Order'][i+1] != 1:
        sub_tables[ df['Order'][i]].append(sub_tables[df['Order'][i] - 1 ][-1] + tmpList)
    elif df['Order'][i] == 1:
        sub_tables[1].append(tmpList)
    else:
        for j in range(1, df['Order'][i]):
            sub_tables[j][-1] += tmpList
#last line
tmpList = df.loc[len(ids) - 1].values.tolist()[1:-2]
tmpList[1] = tmpList[1].value / 1000000000
tmpList[2] = tmpList[2].value / 1000000000
for i in range(1, df['Order'][len(ids) - 1]):
    sub_tables[i][-1] += tmpList


table1 = np.array(sub_tables[1])
table2 = np.array(sub_tables[2])
table3 = np.array(sub_tables[3])
table4 = np.array(sub_tables[4])
table5 = np.array(sub_tables[5])
table6 = np.array(sub_tables[6])
table7 = np.array(sub_tables[7])

le = LabelEncoder()
# for table in [sub_tables[0]]:
for table in (table1, table2, table3, table4, table5, table6, table7):
    for j in range(len(table[0])):
        if j % 5 == 0 or j % 5 == 3 :
            table[:, j] = le.fit_transform(table[:, j])

#  one-hot encoding [example]
# for table in [table7]:
#     curTable = table.astype('float')
#     curTable = curTable.astype('int')
#     arr = []
#     for j in range(len(curTable[0]) - 5):
#         if j % 5 == 0 or j % 5 == 3:
#             arr.append(j)
#     oh = OneHotEncoder(categorical_features = arr, sparse=False)
#     oh.fit(curTable)
#     curTable = oh.transform(curTable)
    # print(curTable[0])

# lastIndex get
# 1. randomforest
for table in [table1, table2, table3, table4, table5, table6, table7]:
    curTable = table[:, -10:-4].astype('float')
    curTable = curTable.astype('int')
    oh = OneHotEncoder(categorical_features=[0, 3], sparse=False)
    oh.fit(curTable)
    curTable = oh.transform(curTable)
    X_train, X_test, y_train, y_test = train_test_split(curTable[:, :-1], curTable[:, -1], random_state=0, test_size=.2)

    print("X_train.shape: {}".format(X_train.shape))
    # RandomForestClassifier
    print("RandomForestClassifier:")
    tree = RandomForestClassifier(random_state=0)
    tree.fit(X_train, y_train)
    print("Accuracy on train set: {:.4f}".format(tree.score(X_train, y_train)))
    print("Accuracy on test set: {:.4f}".format(tree.score(X_test, y_test)))
    y_pred = tree.predict(X_test)
    accuracy = accuracy_score(y_test, y_pred)
    # roc曲线
    # https://www.jianshu.com/p/1da84ac7ff03
    fpr, tpr, thresholds = roc_curve(y_test, y_pred)
    # 画出roc曲线
    plt.plot(fpr, tpr, marker='o')
    plt.show()
    AUC = auc(fpr, tpr)
    print("AUC is :{:.2f}".format(AUC))

    # decisionTree
    print("decisionTree:")
    tree1 = DecisionTreeClassifier(criterion='entropy', min_samples_leaf=3)
    tree1.fit(X_train, y_train)
    print("Accuracy on train set: {:.4f}".format(tree1.score(X_train, y_train)))
    print("Accuracy on test set: {:.4f}".format(tree1.score(X_test, y_test)))
    y_pred1 = tree1.predict(X_test)
    accuracy1 = accuracy_score(y_test, y_pred1)
    # roc曲线
    # https://www.jianshu.com/p/1da84ac7ff03
    fpr1, tpr1, thresholds1 = roc_curve(y_test, y_pred1)
    # 画出roc曲线
    plt.plot(fpr1, tpr1, marker='o')
    plt.show()
    AUC1 = auc(fpr1, tpr1)
    print("AUC1 is :{:.2f}".format(AUC1))

    # XGBClassifier
    print("XGBClassifier:")
    tree2 = XGBClassifier()
    tree2.fit(X_train, y_train)
    print("Accuracy on train set: {:.4f}".format(tree2.score(X_train, y_train)))
    print("Accuracy on test set: {:.4f}".format(tree2.score(X_test, y_test)))
    y_pred2 = tree1.predict(X_test)
    accuracy2 = accuracy_score(y_test, y_pred2)
    # roc曲线
    # https://www.jianshu.com/p/1da84ac7ff03
    fpr2, tpr2, thresholds2 = roc_curve(y_test, y_pred2)
    # 画出roc曲线
    plt.plot(fpr2, tpr2, marker='o')
    plt.show()
    AUC2 = auc(fpr2, tpr2)
    print("AUC2 is :{:.2f}".format(AUC2))

#



















# encoding






print("end")









