# -*- coding: utf-8 -*-
"""
Created on Mon Jan 22 16:56:58 2018

@author:Devin

"""
import pandas as pd
import os
import numpy as np
from tqdm import tqdm
from mix_class import Construction_data
from sklearn.preprocessing import Imputer
from sklearn.externals import joblib
from collections import Counter
import copy
from sklearn.metrics import confusion_matrix
import math
import matplotlib.pyplot as plt
import hashlib
import pickle
import json

# =============================================================================
# Define a class for detect time
# =============================================================================
class battery_time_detect():
    '''
    This is a class package for battery error time detect use already trained 2-class-model default RF.
    two key parameter need: data and model path
    data:list_df element very highly suggest DataFrame,if not,please assign columns and index
    model path: the path where model saved, default pkl format
    base_sort_path:base path for differ days
    col:if data format is not df, columns
    indexs:if data format is not df, index
    result_path: dict result path,json
    
    ''
    lap:预测窗口长度，在该窗口内统计01个数
    gap:滑窗间隔，默认为lap，不进行重复计算
    pp: 滑窗内01区分阈值，默认1的个数大于10%认为故障
    min_pre_gap:预测时间取滑窗内最后一条，与真实故障时间之差的最小值
    max_pre_gap:预测时间取滑窗内最后一条，与真实故障时间之差的最大值
    ''
    '''
    def __init__(self,df,model_path,base_sort_path,result_path = 'predict_result/',col=None,indexs=None,lap=10,gap=10,pp=0.1,
                 min_pre_gap=0,max_pre_gap=90,periods = 'D'):
        if not os.path.exists(model_path):
            print('model path not exist !')
            return
        else:
            model_paths = os.path.join(model_path,os.listdir(model_path)[0])
        if not type(df) == list:
            print('df should be a list which element is df !')
            return
        if not os.path.exists(result_path):
            os.makedirs(result_path)
            
        self.df_list = df
        self.col = col
        self.indexs = indexs
        self.lap = lap
        self.gap = gap
        self.pp = pp
        self.min_pre_gap = min_pre_gap
        self.max_pre_gap = max_pre_gap
        self.periods = periods
        self.model = joblib.load(model_paths)
        self.cache_root_dir = result_path
        self.base_sort_path = base_sort_path
        self.use_colsl = ['15_minutes_st', 'ac_auto_on_count', 'ac_on_count', 'avg_run_times',
                           'avg_temperature_in', 'avg_temperature_out', 'batt_0', 'batt_1',
                           'battery_avg', 'battery_max', 'battery_min', 'battery_mix1',
                           'battery_mix4', 'battery_start', 'battery_var', 'brakepos_var',
                           'c_temp_avg', 'cool_avg', 'd_temp_avg', 'dip_light_count',
                           'dr_avg_mile', 'dr_avg_v', 'dr_mile50', 'dr_v0_rate', 'fanspeed_avg',
                           'fanspeed_var', 'kurt_v_battery', 'main_light_count', 'mils',
                           'n_power_time', 'num_non_p', 'oli_box_avg',
                           'remote_boot_times', 'run_time_2k', 'side_light_count',
                           'skew_v_battery', 'starting_num_day', 'wiperswitch_avg', 'x_acc',
                           'y_acc', 'z_acc']
        self.base = self._load_base()
        self.temp_data_list = self._combin_data()
        self.state = self.final_test
        
    def _md5(self,s):
        m = hashlib.md5()
        m.update(s.encode("utf8"))
        return m.hexdigest()
        
    def _load_base(self):
        path = os.path.join(self.base_sort_path,'%s.dump' % self._md5('base'))
        with open(path, 'rb') as fr:
            base = pickle.load(fr)
        return base
        
        
        
    def _combin_data(self):
        '''
        数据抽取
        数据聚合
        pass
        
        '''
    def _get_moving_data(self,window_dataframe):
        rows = window_dataframe.shape[0]
        window_num = math.floor(rows/self.gap)
        index = 0
        result_list = []
        for i in range(window_num):
            tran_window_data = window_dataframe.iloc[index:index+self.lap,:]
            if tran_window_data.shape[0] == self.lap:
                result_list.append(tran_window_data)
            index += self.gap
            if index > rows:
                break
        return result_list
    
    def _count_01(self,df):
        pre_list = list(df.loc[:,'pre_label']);values_counts = Counter(pre_list)
        if (values_counts[1]/len(pre_list)) >= self.pp:
            return 1
        else:
            return 0
    
    def _pre_df(self,df):
        if df.shape[0]<=self.lap:
            v = self._count_01(df)
            return v,df.index[0]
        else:
            for item in self._get_moving_data(df):
                v2 = self._count_01(item)
                if v2==1:
                    break
                elif v2==0:
                    continue
            return v2,item.index[0]
    
    def _distence(self,points):
        distence_list=[];r_dict={}
        for i in self.base:
            sim = [1.0 / (1.0 + (np.linalg.norm(np.mean(points) - x))) for x in self.base[i]]
            distence_list.append([i,max(sim)])
        sums = sum([z[1] for z in distence_list])
        for x in distence_list:
            r_dict[x[0]] = x[1]/sums
        return r_dict
        '''
        max_d = max([x[1] for x in distence_list])
        for x in distence_list:
            if x[1]==max_d:
                return x[0],x[1]/sum([z[1] for z in distence_list])
        '''
    
    def final_test(self):
        result = {}
        for item in self.df_list:
            pre = list(self.model.predict(item.loc[:,self.use_colsl]))
            tem = copy.deepcopy(item);tem['pre_label'] = pre
            v,t = self._pre_df(tem)
            if v == 1:
                pvin = item.vin[0]
                p_result = self._distence(item)
                result[pvin] = {str(t):p_result}
        with open(os.path.join(self.cache_root_dir,"record.json"),"w") as f:
            json.dump(result,f)
        return 1
        











# =============================================================================
# read_test_data
# =============================================================================
test = battery_time_detect(testlist,'model','base')
with open("predict_result/record.json",'r') as load_f:
    load_dict = json.load(load_f)

# =============================================================================
# report
# =============================================================================
def confusion_matrix_plot_matplotlib(y_truth, y_predict,cmap=plt.cm.Blues):
    cm = confusion_matrix(y_truth, y_predict)
    plt.matshow(cm, cmap=cmap)  # 混淆矩阵图
    plt.colorbar()  # 颜色标签
    for x in range(len(cm)):  # 数据标签
        for y in range(len(cm)):
            plt.annotate(cm[x, y], xy=(x, y), horizontalalignment='center', verticalalignment='center')
    plt.ylabel('True label')  # 坐标轴标签
    plt.xlabel('Predicted label')  # 坐标轴标签
    plt.show()  # 显示作图结果


def classification_report(y_true, y_pred):  
    from sklearn.metrics import classification_report  
    print ("classification_report(left: labels):")  
    print (classification_report(y_true, y_pred))    

# =============================================================================
# count
# =============================================================================
def get_moving_data(window_dataframe,gap,lap):
    rows = window_dataframe.shape[0]
    window_num = math.floor(rows/gap)
    index = 0
    result_list = []
    for i in range(window_num):
        tran_window_data = window_dataframe.iloc[index:index+lap,:]
        if tran_window_data.shape[0] == lap:
            result_list.append(tran_window_data)
        index += gap
        if index > rows:
            break
    return result_list    

def count_01(df,pp):
    pre_list = list(df.loc[:,'pre_label']);values_counts = Counter(pre_list)
    if (values_counts[1]/len(pre_list)) >= pp:
        return 1
    else:
        return 0


def pre_df(df,pp,lap,gap):
    if df.shape[0]<=lap:
        v = count_01(df,pp)
        return v,df.index[0]
    else:
        for item in get_moving_data(df,gap,lap):
            v = count_01(item,pp)
            if v==1:
                break
            elif v==0:
                continue
        return v,item.index[0]
    
            




def final_test(test_list,v_err,vin_nor,periods = 'D',lap=10,gap=1,pp=0.2,min_pre_gap=0,max_pre_gap=90):
    '''
    
    lap:预测窗口长度，在该窗口内统计01个数
    gap:滑窗间隔，默认为1，尽量覆盖全部情况
    pp: 滑窗内01区分阈值，默认1的个数大于40%认为故障
    min_pre_gap:预测时间取滑窗内最后一条，与真实故障时间之差的最小值
    max_pre_gap:预测时间取滑窗内最后一条，与真实故障时间之差的最大值
    
    '''
    model = joblib.load(model_path);dictx = dict(v_err);x10=[];nor = 1;err=1
    result_df = [];p=[];r=[];hit_1=[];p1r0=[];p0r1=[];hit_1_all=[];hit_0_all=[];hit11 = [];p11r00=[]
    s15 = [];s1530=[];s3060=[];s6080=[];s80 = [];hit_0=[]
    for item in tqdm(test_list):
        pre = list(model.predict(item.loc[:,use_colsl]))
        tem = copy.deepcopy(item);tem['pre_label'] = pre;result_df.append(tem)
        v,t = pre_df(tem,pp,lap,gap)
        pvin = item.vin[0]
        if pvin in vin_nor:
            nor+=1
            hit_0_all.append([pvin,t,item])
            if v == 0:
                r.append(0);p.append(0)
                s80.append(pvin)
            elif v == 1:
                r.append(0);p.append(1)
                p11r00.append([[pvin,t]])
        elif pvin in dictx:
            err+=1
            hit_1_all.append([pvin,t,dictx[pvin]])
            if v == 1:
                hit_1.append([pvin,t,dictx[pvin]])
                if min_pre_gap < pd.to_datetime(dictx[pvin]).to_period(periods)-t <= max_pre_gap:
                    r.append(1);p.append(1)
                    hit11.append([[pvin,t,dictx[pvin]]])
                    if min_pre_gap < pd.to_datetime(dictx[pvin]).to_period(periods)-t <=15:
                        s15.append(item)
                    elif 15 < pd.to_datetime(dictx[pvin]).to_period(periods)-t <=30:
                        s1530.append(item)
                    elif 30 < pd.to_datetime(dictx[pvin]).to_period(periods)-t <=60:
                        s3060.append(item)
                    elif 60 < pd.to_datetime(dictx[pvin]).to_period(periods)-t <=80:
                        s6080.append(item)
                    elif 80 < pd.to_datetime(dictx[pvin]).to_period(periods)-t:
#                        s80.append(item)
                        pass
                else:
                    r.append(0);p.append(1)
                    p1r0.append([[pvin,t,dictx[pvin]]])
            if v == 0:
                hit_0.append([pvin,t,dictx[pvin]])
                if 100 < pd.to_datetime(dictx[pvin]).to_period(periods)-t:
                    r.append(0);p.append(0)
#                elif item.index[0]-pd.to_datetime(dictx[pvin]).to_period(periods)>0:
#                    r.append(0);p.append(0)
#                elif min_pre_gap < t - pd.to_datetime(dictx[pvin]).to_period(periods) <= max_pre_gap:
#                    r.append(1);p.append(0)
#                    p0r1.append([[pvin,t,dictx[pvin]]])
                else:
                    r.append(1);p.append(0)
                    p0r1.append([[pvin,t,dictx[pvin]]])
        else:
            print('....>>>> the %s not in base_line !!!'%(pvin))
    classification_report(r,p)
    confusion_matrix_plot_matplotlib(p,r,cmap=plt.cm.tab10_r)
    return result_df,p,r,hit_1,p1r0,p0r1,hit_1_all,hit_0_all,hit11,x10,err,nor,p11r00,s15,s1530,s3060,s6080,s80,hit_0

def point(ss):
    sd = []
    for ix in ss:
        sd.append(np.mean(ix))
    return sd
def fuck():
    base = {}
    base[15]=point(s15)
    base[1530]=point(s1530)
    base[3060]=point(s3060)
    base[6080]=point(s6080)
    return base   
def distence(points,result):
    distence_list=[]
    for i in result:
        sim = [1.0 / (1.0 + (np.linalg.norm(np.mean(points) - x))) for x in result[i]]
        distence_list.append([i,max(sim)])
    max_d = max([x[1] for x in distence_list])
    for x in distence_list:
        if x[1]==max_d:
            return x[0],x[1]/sum([z[1] for z in distence_list])




# =============================================================================
# main
# =============================================================================
if __name__ == '__main__':
#    v_err,vin_nor = get_vin_index()
#    test_list = read_test(data_path,v_err,vin_nor)
    result_df,p,r,hit_1,p1r0,p0r1,hit_1_all,hit_0_all,hit11,x10,err,nor,p11r00,s15,s1530,s3060,s6080,s80,hit_0 = final_test(testlist,v_err,vin_nor,lap=7,gap=7,pp=0.1,min_pre_gap=0,max_pre_gap=1000)
    sample = [x[x.vin=='LSJA24W68HS011988'] for x in  temp_vin]
