# -*- coding: utf-8 -*-
"""
Created on Thu Mar 22 07:52:05 2018

@author:Devin

"""
import os
import pandas as pd
import pickle
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report  
from sklearn.model_selection import train_test_split
import copy
from collections import Counter
import sklearn
from sklearn.utils import shuffle
from sklearn.ensemble import RandomForestClassifier
from sklearn.externals import joblib
from sklearn.decomposition import PCA
from sklearn.feature_selection import RFE

use_cols = ['n_power_time_012', 'n_power_time_12', 'run_time_2k', 'num_non_p',
       'battery_start', 'battery_avg', 'pm_1', 'pm_0', 'pm_2', 'pm_3',
       'run_time_1k', 'run_time_15k', 'run_time_25k', 'run_time_3k',
       'oil_non_p', 'oil_start', 'battery_11', 'battery_12', 'battery_13',
       'battery_14', '30_minutes_st', 'battery_var', 'battery_max',
       'battery_mix4', 'battery_mix1', 'battery_min', 'dr_v30_rate',
       'kurt_v_battery', 'skew_v_battery', 'avg_temperature_in',
       'avg_temperature_out', 'starting_num_day', '15_minutes_st',
       'remote_boot_times', 'x_acc', 'y_acc', 'z_acc', 'brakepos_var',
       'dr_avg_v', 'dr_avg_mile', 'dr_mile50', 'dr_v0_rate', 'dr_mile30',
       'avg_run_times', 'ac_on_count', 'ac_auto_on_count', 'fanspeed_avg',
       'fanspeed_var', 'd_temp_avg', 'c_temp_avg', 'side_light_count',
       'dip_light_count', 'main_light_count', 'wiperswitch_avg',
       'acc_seqs_min30_0_rate', 'acc_seqs_min20_0_rate',
       'acc_seqs_min10_0_rate', 't_time', 'max_speed', 'total_time_v1_rpm0',
       'total_time_v1_rpm1', 'max_vehrpm', 't_time_batt12', 't_time_batt10',
       't_time_batt14', 'purchase_time', 'oli_box_avg', 'cool_avg', 'mils',
       'batt_1', 'batt_0', 'max_seq_veh']

#this f may be remove,load cache data
def load_cache():
    file_vin_normal = os.listdir('D:\\P_accumulator\\stage_4\\stageX\\stagex\\')
    with open(os.path.join('stagex', file_vin_normal[0]), 'rb') as fr:
        nor_list = pickle.load(fr)
    with open(os.path.join('stagex', file_vin_normal[1]), 'rb') as fr:
        e_list,err_e_list,err_n_list = pickle.load(fr)
    return nor_list,e_list,err_e_list,err_n_list

        
def confusion_matrix_plot_matplotlib(y_truth, y_predict,cmap=plt.cm.Blues):
    cm = confusion_matrix(y_truth, y_predict)
    plt.matshow(cm, cmap=cmap)  # 混淆矩阵图
    plt.colorbar()  # 颜色标签
    for x in range(len(cm)):  # 数据标签
        for y in range(len(cm)):
            plt.annotate(cm[x, y], xy=(x, y), horizontalalignment='center', verticalalignment='center')
    plt.ylabel('True label')  # 坐标轴标签
    plt.xlabel('Predicted label')  # 坐标轴标签
    plt.show()  # 显示作图结果


def classification_reports(y_true, y_pred):  
    print ("classification_report(left: labels):")  
    print (classification_report(y_true, y_pred))  


def get_xy(df):
    df['purchase_time'] = abs(df.purchase_time)
    x = df[use_cols]
    y = df['label']
    return x,y

def merged(wdata):
    return pd.concat(wdata)
    

def clean_list(ll):
    ls=[]
    for item in ll:
        if item.shape[0]>0:
            ls.append(item)
            pass
    return ls


    
def count_01(df):
    pre_list = list(df.loc[:,'pre_label'])
    values_counts = Counter(pre_list)
    if df.loc[:,'label'].iloc[0] == 0:
        return values_counts[0]/len(pre_list),0
    elif df.loc[:,'label'].iloc[0] == 1:
        return values_counts[1]/len(pre_list),1       


def data_clean(df_train,df_test):
    '''
    根据scaler对训练集和测试集的特征变量实现标准化（0均值，方差为1）
    :return: 
    '''
    del_col = []
    for item in use_cols:
        if len(Counter(list(df_train[item]))) == 1 or len(Counter(list(df_test[item]))) == 1:
            del_col.append(item)
            pass
    df_train = df_train.drop(del_col,axis=1)
    df_test = df_test.drop(del_col,axis=1)
    return df_train,df_test,del_col

def data_split(e_list,err_e_list,err_n_list,nor_list):
    all_list = [];emptylist=[]
    for i in range(1717):
        if err_e_list[i].shape[0] ==0 or err_n_list[i].shape[0]==0:
            emptylist.append(i)
        else:
            all_list.append([e_list[i],err_e_list[i],err_n_list[i]])
    print('\n=============>\n'+'ok,error datas num == %d'%(len(all_list))+
          '\n=============>\n'+'start split !')
    train_l,test_l = train_test_split(all_list,train_size=0.8,test_size=0.2)
    train_nor,test_nor = train_test_split(nor_list,train_size=0.8,test_size=0.2)
    train_list = [];test_ll = []
    for item in train_l:
        train_list.append(item[1])
#        train_list.append(item[1])
#        train_list.append(item[1])
        train_list.append(item[2])
    for item2 in test_l:
        test_ll.append(item2[1])
        test_ll.append(item2[2])
    test_ll.extend(test_nor)
    train_list.extend(train_nor)
    nor_no_label = copy.deepcopy(test_nor)
    nor_no = [x.drop(['label'],axis=1) for x in nor_no_label]
    test_list = [x[0] for x in test_l]
    test_list.extend(nor_no)
    return train_list,test_list,test_ll,emptylist



if __name__ == '__main__':
    nor_list,e_list,err_e_list,err_n_list = load_cache()
    train_list,real_test_list,test_list,emptylist = data_split(e_list,err_e_list,err_n_list,nor_list)
    print('\n=============>\n'+'train test data split complite!!!')
    df_train = merged(train_list)
    df_test = merged(test_list)
    df_train = shuffle(df_train)        
    df_test = shuffle(df_test)
    print('\n=============>\n'+'data merge and shuffle complite!!!')
    df_train,df_test,del_col = data_clean(df_train,df_test)
    print('\n=============>\n'+'clean columns complite!!!')
    xr,yr = get_xy(df_train)
    test_xr,test_yr = get_xy(df_test)
    print('\n=============>\n'+'xy split complite!!!')
    
#    pca = PCA(n_components=55,whiten=True)
#    pca.fit(xr)
#    xp = pca.transform(xr)
#    test_xp = pca.transform(test_xr)
#    joblib.dump(pca, 'pca01.pkl') 
    feature_train, feature_test, target_train, target_test = train_test_split(xr,yr,train_size=0.8,test_size=0.2)
    rfc = RandomForestClassifier(n_jobs=-1,oob_score=True,n_estimators =100,class_weight={0: 1, 1: 9},criterion='gini',max_features='auto',
                                 verbose = 1)
#    ,n_estimators =100,class_weight={0: 1, 1: 9}
    print('\n=============>\n'+'start RFE !!!')
    rfe = RFE(estimator=rfc,verbose = 1, n_features_to_select=20,step=1)
    print('\n=============>\n'+'RFE  complete !!!')
    strct = rfe.fit(feature_train,target_train)
#    strct = rfc.fit(feature_train,target_train)
    print(strct)
    Y = strct.predict(feature_test)
#    Y = rfc.predict(feature_test)
#    r_rate = rfc.score(feature_test , target_test)
    r_rate = strct.score(feature_test , target_test)
    print('准确率：',r_rate) 
    classification_reports(target_test,Y)

    confusion_matrix_plot_matplotlib(Y,target_test, cmap=plt.cm.tab10_r)
    joblib.dump(rfc, 'rfc01.pkl') 
#    rcf = joblib.load('rcf.pkl')
    print('...............>> now give real test ! <<.....................')
    y_test = strct.predict(test_xr)

    classification_reports(test_yr,y_test)
    confusion_matrix_plot_matplotlib(y_test,test_yr,cmap=plt.cm.tab10_r)
    print('...............>>  real test over ! <<.....................')
#    imports = dict(zip(use_cols,list(strct.feature_importances_)))
    imports = dict(zip(use_cols,list(strct.ranking_)))
    importxx = sorted(imports.items(), key = lambda x: x[1],reverse=True)
