# -*- coding: utf-8 -*-
"""
Created on Tue Jan  9 12:45:42 2018

@author:Devin

"""
import copy
from skopt import BayesSearchCV
from sklearn.svm import SVC
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDAs
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
from collections import Counter  
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LassoCV
from sklearn import preprocessing
import numpy as np
from sklearn.utils import shuffle  
from sklearn.preprocessing import Imputer
from sklearn.externals import joblib
import random
from sklearn.metrics import roc_curve, auc
import sklearn
from skopt import gp_minimize
from sklearn.pipeline import Pipeline
from skopt.space import Real, Categorical, Integer
from sklearn.ensemble import IsolationForest
from sklearn.ensemble import AdaBoostClassifier
#import  os

use_cols = ['15_minutes_st', 'ac_auto_on_count', 'ac_on_count', 'avg_run_times',
       'avg_temperature_in', 'avg_temperature_out', 'batt_0', 'batt_1',
       'battery_avg', 'battery_max', 'battery_min', 'battery_mix1',
       'battery_mix4', 'battery_start', 'battery_var', 'brakepos_var',
       'c_temp_avg', 'cool_avg', 'd_temp_avg', 'dip_light_count',
       'dr_avg_mile', 'dr_avg_v', 'dr_mile50', 'dr_v0_rate', 'fanspeed_avg',
       'fanspeed_var', 'kurt_v_battery', 'main_light_count', 'mils',
       'n_power_time', 'num_non_p', 'oli_box_avg',
       'remote_boot_times', 'run_time_2k', 'side_light_count',
       'skew_v_battery', 'starting_num_day', 'wiperswitch_avg', 'x_acc',
       'y_acc', 'z_acc','label']

use_colsl = ['15_minutes_st', 'ac_auto_on_count', 'ac_on_count', 'avg_run_times',
       'avg_temperature_in', 'avg_temperature_out', 'batt_0', 'batt_1',
       'battery_avg', 'battery_max', 'battery_min', 'battery_mix1',
       'battery_mix4', 'battery_start', 'battery_var', 'brakepos_var',
       'c_temp_avg', 'cool_avg', 'd_temp_avg', 'dip_light_count',
       'dr_avg_mile', 'dr_avg_v', 'dr_mile50', 'dr_v0_rate', 'fanspeed_avg',
       'fanspeed_var', 'kurt_v_battery', 'main_light_count', 'mils',
       'n_power_time', 'num_non_p', 'oli_box_avg',
       'remote_boot_times', 'run_time_2k', 'side_light_count',
       'skew_v_battery', 'starting_num_day', 'wiperswitch_avg', 'x_acc',
       'y_acc', 'z_acc']


def confusion_matrix_plot_matplotlib(y_truth, y_predict,cmap=plt.cm.Blues):
    cm = confusion_matrix(y_truth, y_predict)
    plt.matshow(cm, cmap=cmap)  # 混淆矩阵图
    plt.colorbar()  # 颜色标签
    for x in range(len(cm)):  # 数据标签
        for y in range(len(cm)):
            plt.annotate(cm[x, y], xy=(x, y), horizontalalignment='center', verticalalignment='center')
    plt.ylabel('True label')  # 坐标轴标签
    plt.xlabel('Predicted label')  # 坐标轴标签
    plt.show()  # 显示作图结果


def classification_report(y_true, y_pred):  
    from sklearn.metrics import classification_report  
    print ("classification_report(left: labels):")  
    print (classification_report(y_true, y_pred))  


def get_xy(df):
    x = df[use_colsl]
    y = df['label']
    return x,y

def merged(wdata):
    return pd.concat(wdata)
#    data = pd.DataFrame()
#    for item in tqdm(wdata):
#        if item.shape[0]>0:
#            data = data.append(item)
    

def clean_list(ll):
    ls=[]
    for item in ll:
        if item.shape[0]>0:
            ls.append(item)
            pass
    return ls


    
def count_01(df):
    pre_list = list(df.loc[:,'pre_label'])
    values_counts = Counter(pre_list)
    if df.loc[:,'label'].iloc[0] == 0:
        return values_counts[0]/len(pre_list),0
    elif df.loc[:,'label'].iloc[0] == 1:
        return values_counts[1]/len(pre_list),1    

def split_data(err1,nor1,norn,num=0.75):
#    err1 = [x[use_cols] for x in err1];nor1 = [x[use_cols] for x in nor1];norn = [x[use_cols] for x in norn]
    random.shuffle(err1);random.shuffle(nor1);random.shuffle(norn)
    train_list = err1[:round(len(err1)*num)];test_list=err1[round(len(err1)*num):]
    train_list.extend(nor1[:round(len(nor1)*num)])
    test_list.extend(nor1[round(len(nor1)*num):])
    train_list.extend(norn[:round(len(norn)*num)])
    test_list.extend(norn[round(len(norn)*num):])
    return train_list,test_list    


def data_scaler(X_train, X_test):
    '''

    根据scaler对训练集和测试集的特征变量实现标准化（0均值，方差为1）

    :return: 

    '''
    scaler = sklearn.preprocessing.StandardScaler().fit(X_train)
    X_train_std = scaler.transform(X_train)
    X_test_std = scaler.transform(X_test)
    return X_train_std,X_test_std,scaler

def data_split(e_list,err_e_list,err_n_list,nor_list):
    all_list = [];emptylist=[]
    for i in range(1717):
        if err_e_list[i].shape[0] ==0 or err_n_list[i].shape[0]==0:
            emptylist.append(i)
        else:
            all_list.append([e_list[i],err_e_list[i],err_n_list[i]])
    print('ok,num == %d'%(len(all_list)))
    train_l,test_l = train_test_split(all_list,train_size=0.8,test_size=0.2)
    train_nor,test_nor = train_test_split(nor_list,train_size=0.8,test_size=0.2)
    train_list = [];test_ll = []
    for item in train_l:
        train_list.append(item[1])
#        train_list.append(item[1])
#        train_list.append(item[1])
#        train_list.append(item[2])
#        train_list.append(item[2])
#        train_list.append(item[2])
        train_list.append(item[2])
    for item2 in test_l:
        test_ll.append(item2[1])
        test_ll.append(item2[2])
    test_ll.extend(test_nor)
    train_list.extend(train_nor)
    nor_no_label = copy.deepcopy(test_nor)
    nor_no = [x.drop(['label'],axis=1) for x in nor_no_label]
    test_list = [x[0] for x in test_l]
    test_list.extend(nor_no)
    return train_list,test_list,test_ll,emptylist
    
        


    
    
if __name__ == '__main__':
#    trainx,testlist,testx,emptylist = data_split(e_list,err_e_list,err_n_list,temp_vin)
#    df_train = merged(trainx)
#    df_test = merged(testx)
#    df_train = shuffle(df_train)
#    df_test = shuffle(df_test)
#
#    xr,yr = get_xy(df_train)
#    test_xr,test_yr = get_xy(df_test)
    
    
    
    
#    xr = Imputer().fit_transform(xr)
#    test_xr = Imputer().fit_transform(test_xr)
#    pca = PCA(n_components=15,whiten=True)

#    pca.fit(xr)

#    xp = pca.transform(xr)

#    test_xp = pca.transform(test_xr)
#    joblib.dump(pca, 'pca.pkl') 
#    feature_train, feature_test, target_train, target_test = train_test_split(xr,yr,train_size=0.8,test_size=0.2)
    #随机森林分类树

#    rfc = RandomForestClassifier(n_jobs=-1,oob_score=True,criterion='gini',max_features=None,min_samples_split = 10,verbose = 1,max_depth =8,
#                                 min_samples_leaf=1,n_estimators =200,
#                                 class_weight='balanced')
#    rfc = RandomForestClassifier(
#            n_estimators=122, n_jobs=-1,oob_score=1, random_state=None,
#            verbose=1)
    rfc = RandomForestClassifier(n_jobs=-1,oob_score=True,criterion='gini',max_features='auto',
                                 verbose = 1,n_estimators =100,class_weight={0: 1, 1: 7})
#    rfc = RandomForestClassifier(bootstrap=True, class_weight={0: 1, 1: 3},
#            criterion='gini', max_depth=312, max_features=22,
#            max_leaf_nodes=None, min_impurity_decrease=0.0,
#            min_impurity_split=None, min_samples_leaf=65,
#            min_samples_split=110, min_weight_fraction_leaf=0.0,
#            n_estimators=299, n_jobs=-1, oob_score=True, random_state=None,
#            verbose=1, warm_start=False)
#    rfc = RandomForestClassifier(n_jobs=-1,oob_score=True,verbose=1,class_weight={0: 1, 1: 3})
#    opt = BayesSearchCV(rfc,search_spaces={'criterion':('gini','entropy'),
#                             'n_estimators':(10,700),
#                             'max_features':(3,40),
#                             'max_depth':(1,1000),
#                             'min_samples_split':(2,1000),
#                             'min_samples_leaf':(1,100)},
#    scoring ='recall',verbose=1,n_jobs=-1,n_iter=10)
####
####    opt.fit(feature_train , target_train)
#    opt.fit(feature_train , target_train)
#    print("val. score: %s" % opt.best_score_)
#    print("test score: %s" % opt.score(feature_test , target_test))
#    print("test2 score: %s" % opt.score(test_xr , test_yr))

#    rfc = SVC()
#    rfc = AdaBoostClassifier(algorithm='SAMME.R', base_estimator=None,
#          learning_rate=0.5, n_estimators=100, random_state=None)
    strct = rfc.fit(feature_train,target_train)
    print(strct)
    Y = rfc.predict(feature_test)
    #评估模型准确率

    r_rate = rfc.score(feature_test , target_test)
    print('准确率：',r_rate) 
    classification_report(target_test,Y)

    confusion_matrix_plot_matplotlib(Y,target_test, cmap=plt.cm.tab10_r)
    joblib.dump(rfc, 'rc01.pkl') 
#    rcf = joblib.load('rcf.pkl')
    print('...............>> now give real test ! <<.....................')
    y_test = rfc.predict(test_xr)

    classification_report(test_yr,y_test)
    confusion_matrix_plot_matplotlib(y_test,test_yr,cmap=plt.cm.tab10_r)
    print('...............>>  real test over ! <<.....................')
##    rfc = joblib.load('.//model//7_rcf73-92.pkl')
##    pca = joblib.load('.//model//7_pca73-92.pkl')
#
#
#
imports = dict(zip(use_colsl,list(rfc.feature_importances_)))
imports = sorted(imports.items(), key = lambda x: x[1],reverse=True)
#err_e_list = [x[use_cols] for x in err_e_list]
#err_n_list = [x[use_cols] for x in err_n_list]
#nor_list = [x[use_cols] for x in nor_list]
#err_e_l = [x[use_cols] for x in err_e_l]
#test_lists = [x[use_cols] for x in test_lists]
##resultl = copy.deepcopy(result)
#result = [x[use_cols] for x in result]
#err_e_l = copy.deepcopy(err_e_list)
#err_e_l.extend(err_e_l)
#328  2276
#e = 0 
#for i in err_e_l:
#    e+=i.shape[0]
#    print(e)
#import lime
#import lime.lime_tabular
#labelss = np.array(['normal','error'])
#model = joblib.load('rc01_500g_last.pkl')
#explainer = lime.lime_tabular.LimeTabularExplainer(xr, feature_names=use_colsl, class_names=labelss, discretize_continuous=True)
##i = np.random.randint(0, test.shape[0])
#pre = model.predict(err_n_list[0].iloc[1:2][use_colsl])
#test_x = np.array(err_e_list[2].iloc[1:2][use_colsl]).reshape(-1,)
#exp = explainer.explain_instance(test_x, model.predict_proba, num_features=5, top_labels=2)
#exp.as_pyplot_figure(1)
#ht2 = exp.as_html()
##exp.as_list()
#exp.as_map()
#exp.available_labels()
#exp.save_to_file('htn')
