import os
import pickle
import time

import pandas as pd
import datetime as dt
import math
import sys
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from tsfresh import extract_features
from tsfresh.utilities.dataframe_functions import roll_time_series
from tsfresh.utilities.dataframe_functions import impute
import functools
from sklearn.model_selection import train_test_split

join = os.path.join
dirname = os.path.dirname

CURRENT_PATH = dirname(os.path.realpath(__file__))

sys.path.append(CURRENT_PATH)
from utils_exog import *

# 工具函数

def time_log(text):
    def decorator(func):
        @functools.wraps(func)
        def wrapper(*args, **kw):
            start_time = time.time()
            time_str = time.strftime(
                u"%Y-%m-%d %X", time.localtime(start_time))
            print(' %s: %s Starting!' % (time_str, text))
            ret = func(*args, **kw)
            end_time = time.time()
            time_str = time.strftime(
                u"%Y-%m-%d %X", time.localtime(end_time))
            print(' %s: %s Finish!\n' % (time_str, text))
            print(' 耗时: %s 秒!\n' % (end_time - start_time))
            return ret
        return wrapper
    return decorator


def gen_X_y_data(X_Data, y_Data, predcit_asccode , config_token):
    X_y_data_total = pd.DataFrame()
    for one_asccode in predcit_asccode:
        X_Data_one = X_Data.loc[X_Data['asc_code'] == one_asccode]
        X_Data_one.sort_values(by=['date'], ascending=True, inplace=True)
        y_Data_one = y_Data.loc[y_Data['asc_code'] == one_asccode]
        y_Data_one.sort_values(by=['date'], ascending=True, inplace=True)
        y_Data_one['val_y'] = y_Data_one['val'].shift(config_token['model_version'] * (-1))
        y_Data_one = y_Data_one[['asc_code', 'date', 'val_y']]

        X_y_data = X_Data_one.merge(y_Data_one, on=['asc_code', 'date'], how='inner')

        X_y_data_total = pd.concat([X_y_data_total, X_y_data], axis=0)
    X_y_data_total = X_y_data_total.loc[~X_y_data_total['val_y'].isnull()]
    return X_y_data_total


def gen_proc_basedata(data_path):

    data_total = pd.read_excel( join(data_path, r'data_ts_pivot_manual.xlsx') , sheet_name='')
    predict_token = pd.read_excel(join(data_path, r'data_ts_pivot_manual.xlsx')  , sheet_name='')

    # 基础的数据处理
    predcit_asccode = predict_token['asc_code'].values.tolist()
    data_pred = data_total.loc[data_total['asc_code'].isin(predcit_asccode)]
    data_pred = pd.melt(data_pred, 'asc_code')
    data_pred.columns = ['asc_code','date' , 'val']

    data_pred['date'] = data_pred['date'].apply(lambda x : dt.datetime.strptime(str(x)+'-01' , '%Y-%m-%d'))

    return data_pred , predcit_asccode

def get_model_performance1(y_data_test, y_data_test_pred):
    mae_token = mean_absolute_error(y_data_test, y_data_test_pred)
    rmse_token = math.sqrt(mean_squared_error(y_data_test, y_data_test_pred))
    return mae_token, rmse_token


def get_model_performance2(info1, info2):
    y_ana = pd.DataFrame(list(zip(info1, info2)),
                         columns=['y_true', 'y_pred'])
    y_ana['diff'] = y_ana['y_true'] - y_ana['y_pred']
    y_ana['diff'] = abs(y_ana['diff'])
    y_ana['diff_percent'] = y_ana['diff'] / y_ana['y_true']

    upper_15_token = (y_ana['diff_percent'] >= 0.15).sum() / y_ana.shape[0]
    upper_10_token = (y_ana['diff_percent'] >= 0.1).sum() / y_ana.shape[0]
    upper_5_token = (y_ana['diff_percent'] >= 0.05).sum() / y_ana.shape[0]
    max_diff = y_ana['diff_percent'].max()

    return upper_15_token,upper_10_token, upper_5_token, max_diff


def gen_Y(data_pred):
    y_Data = data_pred.groupby(['asc_code', 'date']).sum()
    y_Data.reset_index(level=[0, 1], inplace=True)
    return y_Data


def gen_train_test(X_y_data_total, config_token):

    train_col = X_y_data_train.columns.to_list()
    train_col.remove('asc_code')
    train_col.remove('date')
    train_col.remove('val_y')

    if config_token['split_date'] != 'shuffle' :
        X_y_data_train = X_y_data_total.loc[X_y_data_total['date'] < dt.datetime.strptime(config_token['split_date'], '%Y-%m-%d')]
        X_y_data_test = X_y_data_total.loc[X_y_data_total['date'] >= dt.datetime.strptime(config_token['split_date'], '%Y-%m-%d')]

        X_data_train = X_y_data_train[train_col]
        y_data_train = X_y_data_train[['val_y']]
        X_data_test = X_y_data_test[train_col]
        y_data_test = X_y_data_test[['val_y']]
    else:
        X_data_train, X_data_test, y_data_train, y_data_test = \
            train_test_split(X_y_data_total[train_col], X_y_data_total['val_y'], test_size=0.25)
        X_y_data_train = pd.concat([X_data_train, y_data_train], axis = 1)
        X_y_data_test = pd.concat([X_data_test, y_data_test], axis = 1)

    return X_y_data_train , X_y_data_test , X_data_train, y_data_train, X_data_test, y_data_test


def rolling_extract_genX(data_pred , config_token):
    df_rolled = roll_time_series(data_pred, column_sort="date", column_id='asc_code',
                                 max_timeshift=config_token['max_timeshift'],
                                 min_timeshift=config_token['min_timeshift'], n_jobs= 0)

    X_Data = extract_features(df_rolled, column_id='id', column_sort='date',
                              column_value='val', impute_function=impute, \
                              show_warnings=False ,
                              n_jobs= 0 , default_fc_parameters =config_token['tsfresh_feature'] )

    X_Data.reset_index(level=[0, 1], inplace=True)
    X_Data.rename(columns={'level_0': 'asc_code', 'level_1': 'date'}, inplace=True)

    month_feature = pd.DataFrame()
    for month in [2, 7, 12]:
        one_month_feature1 = month_distance(df_rolled, month)
        one_month_feature2 = if_month(df_rolled, month)
        one_month_feature_all = one_month_feature1.merge(one_month_feature2, left_index=True, right_index=True)
        one_month_feature_all = one_month_feature_all.reset_index(level=[0, 1])
        one_month_feature_all = one_month_feature_all.rename(columns={'id_': 'asc_code', 'id_time': 'date'})
        month_feature = month_feature.merge(one_month_feature_all, on=['asc_code', 'date'])
    X_Data = X_Data.merge(month_feature, on=['asc_code', 'date'])

    return X_Data
