# -*- coding: utf-8 -*-
"""
Created on Mon Nov  6 09:48:06 2017

@author: xuanlei
"""
from statsmodels.tsa.stattools import adfuller
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from pandas.core import datetools
import pandas as pd
import matplotlib.pyplot as plt 
from matplotlib import pyplot
import matplotlib as mpl
from locale import *
mpl.rcParams['axes.unicode_minus'] = False 
mpl.rcParams['font.sans-serif'] = ['SimHei']
import seaborn as sns


     
def test_set_unique(list_object):
    
    len_set = set([len(item) for item in list_object])
    
    if len(len_set) != 1:
        
        raise ValueError(" this set can't be transformed to a dataframe! ")
        
    else:
        
        pass


def extract_value(str_objective, pattern):
    
    return re.findall(pattern, str_objective)



        
        
def regroup(data_path):

    data = pd.read_csv(data_path, engine='python')
#    in_col,in_row = extract(temp,'起始输入原始值')
#    out_col,out_row = extract(temp,'起始输出原始值')
    pattern = ':\d+\.?\d{0,}'
    generatrix = set(list(data.loc[:,'平衡名称']))
    i = 1
    for item in generatrix:
       
        temp_df = data.loc[data.loc[:,'平衡名称'] == item,:]

        try:
            sub_ivalue_list =[[float(item.replace(':','')) for item in col] for col in [extract_value(s, pattern) for s in temp_df.loc[:, '起始输入原始值']]]
            sub_ovalue_list =[[float(item.replace(':','')) for item in col] for col in [extract_value(s, pattern) for s in temp_df.loc[:, '起始输出原始值']]]
        
        except:
            print('母线：{0}，其子线数据缺失！'.format(item))
            i += 1
            continue 
        
        
        try:
            test_set_unique(sub_ivalue_list)
            test_set_unique(sub_ovalue_list)
        
        # 检测子线长度发生变化
        except:
            print('母线：{0}，其子线成分发生变化，数据错误！'.format(item))
            i += 1
            continue    
               
            
        # 提取字段对应值
        
        return_value = []
        for j in range(len(sub_ivalue_list)):
            tran_row = []
            tran_row.extend(sub_ivalue_list[j])
            tran_row.extend(sub_ovalue_list[j])
            return_value.append(tran_row)
        
        globals()['parent_line_'+str(i)] = pd.DataFrame()
        globals()['parent_line_'+str(i)] = globals()['parent_line_'+str(i)].append(return_value)

        i_len = len(sub_ivalue_list[0])
        o_len = len(sub_ovalue_list[0])
        
        col_name = []
        col_name.extend(['subline_input_'+str(k+1) for k in range(i_len)])
        col_name.extend(['subline_output_'+str(k+1) for k in range(o_len)])
        globals()['parent_line_'+str(i)].columns = col_name

        globals()['parent_line_'+str(i)].loc[:,'date'] = temp_df.loc[:,'日期'].tolist()
        if i == 92:
            print(i)
            break
        
        i += 1

def get_adjust_data(df):
    df_diff = df.iloc[:,:-1].diff().dropna(axis=0)
    for i in range()    
    lines = df_diff.shape[1]
    for_line = list(range(0,int(lines/2)))
    beh_line = list(range(int(lines/2),int(lines)))
    input_df = df_diff.iloc[:, for_line]
    output_df = df_diff.iloc[:, beh_line]
    input_sum = np.sum(input_df, axis=1).tolist()
    output_sum = np.sum(output_df, axis=1).tolist()
    balance = np.subtract(input_sum, output_sum)/input_sum
    return balance

def pplot(df,feature):
    df.index = df.日期
    df = df.drop(['日期','平衡名称','起始输入原始值', '起始输出原始值'],axis=1)
    df = df.astype('float')
    df.loc[:,feature].plot(figsize=(10, 20),subplots=1,label=feature,color='g')
    plt.legend(loc='best')
#    plt.legend(loc=1)
#    plt.title(feature+'：变化趋势')
    plt.xlabel('时间区间')
    plt.ylabel('对应值域')


#def arma


def cov(df):
    df.index = df.日期
    df = df.drop(['日期','平衡名称','起始输入原始值', '起始输出原始值','con'],axis=1)
    df = df.astype('float')
    return df.cov(),df.corr()


def splom_viz(df, labels=None):
#    df.index = df.日期
#    df = df.drop(['日期','平衡名称','起始输入原始值', '起始输出原始值','con'],axis=1)
#    df = df.astype('float')
    mpl.rcParams['font.sans-serif'] = ['Microsoft YaHei']
    ax = sns.pairplot(df, hue=labels, diag_kind='kde', size=3)
    # plt.savefig('splom_viz.png',dpi=900)
    plt.show()

def ts(dfx):
    rolmean = pd.rolling_mean(dfx,window=10)
    rolstd = pd.rolling_std(dfx, window=10)
    fig = plt.figure()
    fig.add_subplot()
    plt.plot(dfx, color = 'blue',label='Original')
    plt.plot(rolmean , color = 'red',label = 'rolling mean')
    plt.plot(rolstd, color = 'black', label= 'Rolling standard deviation')
    
    plt.legend(loc = 'best')
    plt.title('Rolling Mean & Standard Deviation')
    plt.show(block=False)

    

    #Dickey-Fuller test:
    
    print('Results of Dickey-Fuller Test:')
    dftest = adfuller(dfx,autolag = 'AIC')
    #dftest的输出前一项依次为检测值，p值，滞后数，使用的观测数，各个置信度下的临界值
    dfoutput = pd.Series(dftest[0:4],index = ['Test Statistic','p-value','#Lags Used','Number of Observations Used'])
    for key,value in dftest[4].items():
        dfoutput['Critical value (%s)' %key] = value
    
    print(dfoutput)

def decompose(df):
    
    # 返回包含三个部分 trend（趋势部分） ， seasonal（季节性部分） 和residual (残留部分)
    decomposition = seasonal_decompose(df)
    
    trend = decomposition.trend
    seasonal = decomposition.seasonal
    residual = decomposition.resid
    
    plt.subplot(411)
    plt.plot(df, label='Original')
    plt.legend(loc='best')
    plt.subplot(412)
    plt.plot(trend, label='Trend')
    plt.legend(loc='best')
    plt.subplot(413)
    plt.plot(seasonal,label='Seasonality')
    plt.legend(loc='best')
    plt.subplot(414)
    plt.plot(residual, label='Residuals')
    plt.legend(loc='best')
    plt.tight_layout()
    
    return trend , seasonal, residual

def test_stationarity(df):
    df.index = df.con
    df = df.drop(['日期','平衡名称','起始输入原始值', '起始输出原始值','con'],axis=1)
    df = df.astype('float')
    for item in df.columns:
        try:
            dfx = df[item]
            print('.......>>>>> %s <<<<<<.....'%item)
            ts(dfx)
        except:
            print('>>>>> %s <<<<<<'%item)
        print('############手动差分##############')
        ts_log = np.log(dfx)
        moving_avg = pd.rolling_mean(ts_log,12)
        plt.plot(ts_log ,color = 'blue')
        plt.plot(moving_avg, color='red')
        
        ts_log_moving_avg_diff = ts_log-moving_avg
        ts_log_moving_avg_diff.dropna(inplace = True)
        
        ts(ts_log_moving_avg_diff)
        print('#############自动差分#############')
#        # halflife的值决定了衰减因子alpha：  alpha = 1 - exp(log(0.5) / halflife)
#        expweighted_avg = pd.ewma(ts_log,halflife=12)
#        ts_log_ewma_diff = ts_log - expweighted_avg
#        ts(ts_log_ewma_diff)
        ts_log_diff = ts_log - ts_log.shift()
        ts_log_diff.dropna(inplace=True)
        ts(ts_log_diff)
#        decompose(ts_log)
    

def rediff(df):
    df.index = df.con
    df = df.drop(['日期','平衡名称','起始输入原始值', '起始输出原始值','con'],axis=1)
    df = df.astype('float')
#    diff = df.diff()
    pyplot.plot(df)
    pyplot.show()

def draw_acf_pacf(df, lags=3):
    df.index = df.con
    df = df.drop(['日期','平衡名称','起始输入原始值', '起始输出原始值','con'],axis=1)
    df = df.astype('float')
    f = plt.figure(facecolor='white')
    ax1 = f.add_subplot(211)
    plot_acf(df, lags=3, ax=ax1)
    ax2 = f.add_subplot(212)
    plot_pacf(df, lags=3, ax=ax2)
    plt.show()
    

    


def cluster(df):
    df.index = df.con
    date = df.loc[:,'日期']
    left = df.loc[:,['平衡名称','起始输入原始值', '起始输出原始值']]
    df = df.drop(['日期','平衡名称','起始输入原始值', '起始输出原始值','con'],axis=1)
    df = df.astype('float')
    
    #对数据进行标准化  
    dfs = StandardScaler().fit_transform(df)  
      
    #计算  
    db = DBSCAN(eps=0.8,min_samples=5).fit(dfs)  
#    core_samples_mask = np.zeros_like(db.labels_,dtype=bool)  
#    core_samples_mask[db.core_sample_indices_] = True  
    labels = db.labels_  
    df['labels'] = labels
    
    df = df.join(left)
    df.index = date
#    n_clusters_ = len(set(labels))-(1 if -1 in labels else 0)  
      
    unique_labels = list(set(labels))
    return df,unique_labels




if __name__ == '__main__':
    regroup('code/data_ori.csv')





