import warnings
warnings.filterwarnings("ignore")
import numpy as np
import time
from pandas import read_excel
from pandas import DataFrame
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
from xgboost import plot_importance
# https://www.cnblogs.com/gdjlc/p/11409804.html

df = read_excel(r'C:\Users\zll\Desktop\JMS_\RawData.xlsx',Sheetname='Sheet1',header=0 )
len1 = df['Case ID'].__len__()
continuous_conds = df.ix[:,['StartTime1',  'EndTime1', 'Cost1',
             'StartTime2',  'EndTime2', 'Cost2',
             'StartTime3',  'EndTime3', 'Cost3',
             'StartTime4',  'EndTime4', 'Cost4',
             'StartTime5',  'EndTime5', 'Cost5',
             'StartTime6',  'EndTime6', 'Cost6',
             'StartTime7',  'EndTime7', 'Cost7',
             'StartTime8',  'EndTime8', 'Cost8',
             'StartTime9',  'EndTime9', 'Cost9'
            ]].values

continuous_res = np.zeros([len1, 27])
for i in range(len1):
    for j in range(27):
        if type(continuous_conds[i, j]) == str:
                continuous_res[i, j] = int(continuous_conds[i, j].split(":")[0]) * 3600 + int(continuous_conds[i, j].split(":")[1]) * 60 + int(continuous_conds[i, j].split(":")[2])
        elif type(continuous_conds[i, j]) == int or type(continuous_conds[i, j]) == float:
            continuous_res[i, j] = continuous_conds[i, j]
        else:
            continuous_res[i, j] = continuous_conds[i, j].hour *  3600 + continuous_conds[i, j].minute * 60 + continuous_conds[i, j].second

dispersed_conds = df.ix[:,[
                           'Register_Request',  'Reason1', 'Resource1',   #1, 3, 1   5
                           'Date_Check', 'Type2', 'Resource2',            #1, 2, 1   9
                           'Mode_Audit',  'Resource3',   #1, 1   11
                           'Manual_Review',  'Resource4', #2, 2   15
                           'Reason_Review', 'Result5', 'Resource5',  #2, 3, 2   22
                           'Ticket_Check', 'Valid6', 'Resource6',    # 2, 3, 2   29
                           'Casually_Examine', 'Result7', 'Resource7', # 2, 3, 2   36
                           'Thoroughly_Examine', 'Result8', 'Resource8', # 2, 3, 2   43
                           'Decide','Resource9'  # 2,  2   47
             ]].values

results  = df['Accept'].values
costs =  df.ix[:,['Cost10', 'Cost11']].values
resources = df.ix[:,['Resource10', 'Resource11']].values

cost_res = []
for i in range(len1):
    tmp = 0
    for j in range(27):
        if j % 3 == 2:
            tmp += continuous_res[i, j]
    tmp += costs[i, 0]
    tmp += costs[i, 1]
    cost_res.append(tmp)
cost_res = np.array(cost_res)


le = LabelEncoder()
oh = OneHotEncoder(sparse=False)
results = le.fit_transform(results)
for i in range(len(dispersed_conds[0])):
    dispersed_conds[:, i] = le.fit_transform(dispersed_conds[:, i])
oh.fit(dispersed_conds)
onehot_enc = oh.transform(dispersed_conds)

#将同一index归并
arr1 = [5, 9, 11, 15, 22, 29, 36, 43, 47]
to_traineAttrs = onehot_enc[:, :5]
for i in range(1, len(arr1)):
    to_traineAttrs = np.hstack((to_traineAttrs, continuous_res[:, 3*(i-1): 3*i], onehot_enc[:, arr1[i-1]:arr1[i]]))
# to_traineAttrs = np.hstack((to_traineAttrs, continuous_res[:, 21:24], onehot_enc[:, 48:]))
res = np.hstack((to_traineAttrs, continuous_res[:, 24:]))

# xgboost
# https://www.cnblogs.com/wanglei5205/p/8578486.html
# arr1 =    [5,            9,              13,              20,              27,              34,              41,              48,             55]
imp_split = [ 8,  15,  20,  27,  37,  47,  57,  67,  74]
mse = 0
for table in [res]:
    X_train, X_test, y_train, y_test = train_test_split( table , cost_res, random_state=0, test_size=.2 )
    model = XGBRegressor()
    # model = RandomForestRegressor(criterion='mse', max_depth=10, random_state=0)
    model.fit(X_train, y_train)
    y_pred = model.predict(X_test)
    mae = np.sum(np.abs(y_pred - y_test)) / len(y_test)
    # for i in range(len(y_test)):
    #     mse  += (y_pred[i] - y_test[i]) * (y_pred[i] - y_test[i])
    # mse = mse * 1.0 / len(y_test)
    print("mae is : " + str(mae))

    fip = model.feature_importances_
    fips = [sum(fip[:8])]
    for tmp in range(1, len(imp_split)):
        fips.append(sum(fip[:imp_split[tmp]]) - sum(fips))
    fip_result = [format(x, '.2%') for x in fips]
    print("Feature importances:\n{}".format(fip_result))
    plt.bar([x for x in range(len(fips))], fips)
    plt.show()


print("done")