# -*- coding: utf-8 -*-
"""
Created on Sat Dec 26 15:30:10 2020
@author: 59567
"""
import pandas as pd
import numpy as np
import sys
from tqdm import tqdm

sys.path.append('..')
from itertools import product
from numpy import mean
from tjdutils.utils import real_path, load_pickle, save_pickle, check_path, current_time
from tjdutils.utils import get_file_list, exists
from tjd_slicer.tjd_slicer import Slicer_for_time

from copy import deepcopy
from multiprocessing import Pool

try:
    from collector.metrics import Metrics
    from collector.collector_utils import add_cycle_key
    from collector.collector_utils import change_targets_by_upper_and_lower
except:
    print('本文件夹import')
    from metrics import Metrics
    from collector_utils import add_cycle_key
    from collector_utils import change_targets_by_upper_and_lower


# 对一个文件夹内的多个pkl文件执行
def multi_get_summary(tvt, pt, all_keys, trainer_addresses):
    results = []
    collector_pool = Pool(6)
    for i_path, path in enumerate(trainer_addresses):
        results.append(
            collector_pool.apply_async(
                basic_get_summary,
                (i_path, path, tvt, pt, all_keys)))
    collector_pool.close()
    collector_pool.join()
    concater = [result.get() for result in results]
    summary_pd = pd.concat(concater, axis=0)
    summary_pd.index = list(range(1, summary_pd.shape[0] + 1))
    return summary_pd, results


# 对文件夹内的单个pkl文件执行
def basic_get_summary(i_path, path, tvt, pt, all_keys):
    print('i', i_path, path)
    summary_dict = {}  # 储存pkl文件的执行结果
    values = load_pickle(path)
    for i in tqdm(range(len(values)), desc='run collector with batch>>>' + str(i_path)):
        key = values[i]['trainer_key']  # “字典状”的字符串，储存单个项目的索引信息
        value = values[i]  # 字典，储存单个项目的具体数值
        col_dict = {}  # 储存pkl文件中单个key和value的执行结果
        col_dict = multi_feature_selector(all_keys, key, value, col_dict)
        col_dict = multi_cal_metrics(tvt, pt, value, col_dict)
        col_dict = multi_cal_times(col_dict)
        summary_dict[key] = col_dict
    summary_pd = pd.DataFrame(summary_dict).T
    # summary_dict为一个len(values)项的字典，其中每个字典为一个n项的字典，根据函数pd.DataFrame和T转置，
    # 输出summary_pd是一个size为len(values)*n的dataframe
    summary_pd['key'] = summary_pd.index  # 给summary_pd创建新属性'key'，数值为其index。因此最终输出summary_pd的size为11700*74
    return summary_pd


# 先将“字典状”字符串key修改为字典exist_items，
# 然后给空字典col_dict创建all_keys中的关键字和数值（对未出现在exist_items中的关键字赋值'-'）和features关键字和数值
def multi_feature_selector(all_keys, key, value, col_dict):
    exist_items = eval(key.replace('}{', ','))
    exist_keys = list(exist_items.keys())
    for k in all_keys:
        col_dict[k] = exist_items[k] if k in exist_keys else '-'
    col_dict['features'] = value['features'][:int(value['n_features'])]
    return col_dict


# 给col_dict字典创建关于tvt和pt的新关键字和数据
def multi_cal_metrics(tvt, pt, value, col_dict):
    output = col_dict
    correct = True
    if correct:
        output, value = change_targets_by_upper_and_lower(output, value)
    for t_v_t, pt in product(tvt, pt):
        s = 'y_' + t_v_t + '_' + pt
        output[s] = value[s]
    m = output
    output['test_0y'] = value['y_test_targets'][0, 0]
    output['test_0y_p'] = value['y_test_predictions'][0, 0]
    output['model_path'] = value['model_path']
    for key in ['y_test_predictions', 'y_test_targets']:
        output[key] = np.vstack((value['y_valid_targets'][-len(output['y_test_targets']), :], value[key]))
    for name in ['train_loss', 'valid_loss']:
        output[name] = value[name]
    y_train_targets = output['y_train_targets']
    y_valid_targets = output['y_valid_targets']
    for t_v_t in tvt:
        m_r = Metrics(output['y_' + t_v_t + '_targets'], output['y_' + t_v_t + '_predictions'],
                      y_train_targets, y_valid_targets)
        for m, v in m_r.metrics_result.items():
            output[t_v_t + '_' + m] = v  # 给col_dict字典创建不同维度不同评价方式的评分结果的关键字和数据
    return output


# 给col_dict字典创建关于tvt耗时的新关键字和数据
def multi_cal_times(output):
    slicer_res = Slicer_for_time(freq=output['freq'], end_datetime=output['end_datetime'],
                                 periods=int(output['periods']), x_time_step=output['x_time_step'],
                                 y_time_step=output['y_time_step'], valid_pct=0.2)

    output['train_times'] = slicer_res.train_times
    output['valid_times'] = slicer_res.valid_times
    output['test_times'] = slicer_res.test_times
    return output


# 根据col_dict为charter函数准备数据
def rolling_result(tvt, all_keys, summary_pd, y_name, s_adds_dir, selector_args):
    basic_metrics = ['mae', 'mse', 'acc', 'mae_y', '1_mae_y', 'vbp']
    metrics = [t_v_t + '_' + bs for t_v_t in tvt for bs in basic_metrics]
    l_keys = latent_keys(all_keys)
    rolling = {}
    gb = summary_pd.groupby(l_keys)  # gb是可迭代panda中的groupby对象，该对象为项目名称、项目数据
    # 创建文件夹并将项目数据保存，并为rolling生成关键字“项目名称”，其中储存（项目名称，保存地址）形式的数据并后续会保存
    for name, data in gb:
        print('name:', name, 'data_shape', data.shape, 'shapes 35 ok')
        path_name = y_name.replace(':', '_') + '_' + current_time() + '.pkl'
        time_s_list = s_adds_dir.split("_")
        del time_s_list[0]
        time_s = "_".join(time_s_list)
        dir_path = check_path('../../output/collector/collector' + time_s)
        dir_path = dir_path + '/'
        dir_path = dir_path.replace(':', '_')
        exists(dir_path)
        save_pickle(data, dir_path + path_name, long_str=True)
        rolling[name] = (name, dir_path + path_name)

    for key in metrics:
        if 'test' not in key:
            path_name = y_name.replace(':', '_') + '_' + key + '.pkl'
            save_pickle(rolling_by(summary_pd, key), dir_path + path_name, long_str=True)
            rolling[key] = (key, dir_path + path_name)
    rolling_m, rolling_f = rolling_metrics(rolling, selector_args, metrics)
    result_dict = {'df': rolling_m, 'dict': rolling, 'forecast': rolling_f,
                   'cycle_key': l_keys, 'y_name': y_name}
    result_dict = add_cycle_key(result_dict)
    collector_path = save_pickle(result_dict, 'collector')
    rolling_m.to_excel(check_path('../output/rolling_acc/metric' + y_name.replace(':', '_') + current_time() + '.xlsx'))
    return collector_path


# 删除三个关键字
def latent_keys(all_keys):
    latent_keys = deepcopy(all_keys)
    latent_keys.remove('end_datetime')
    latent_keys.remove('freq')
    latent_keys.remove('test_len')
    return latent_keys


def rolling_by(summary_pd, key):
    dates = sorted(list(set(list(summary_pd['end_datetime']))))
    rolling_good_pd = []
    for t in dates:
        temp_pd = summary_pd.loc[(summary_pd['end_datetime'] == t), :]
        asd = False if 'acc' in key else True
        temp_pd = temp_pd.sort_values(by=key, ascending=asd)
        rolling_good_pd.append(temp_pd.iloc[[0], :])
    return pd.concat(rolling_good_pd, axis=0)


def rolling_metrics(rolling, selector_args, metrics):
    rolling_m = []
    y = pd.read_excel(real_path('y集合.xlsx'), index_col=0)
    y.index = [str(t)[:10] for t in y.index]
    end_datetimes = list(set([selector_args[i]['end_datetime'] for i in range(len(selector_args))]))
    end_datetimes.sort()
    y_name = selector_args[0]['y_name']
    rolling_targets = {t: y.loc[t, y_name] for t in end_datetimes}
    rolling_forecast_li = [pd.DataFrame(rolling_targets, index=['y_real'])]
    length = len(rolling)
    for i, (k, v_adress) in enumerate(rolling.items()):
        print('Calculate rolling metrics i %d in %d' % (i, length))
        v_df = load_pickle(v_adress[1])
        temp_dict, rolling_forecast = {}, {}
        v_df.index = v_df['end_datetime']
        temp_dict['rolling_acc'] = mean(v_df['test_acc'])
        temp_dict['triangle_area'] = 0.5 * mean(v_df['test_mae'])
        for metric in metrics:
            temp_dict[metric] = mean(v_df[metric])
        for t in end_datetimes:
            temp_dict[t] = int(v_df.loc[t, 'test_acc'][0])
            rolling_forecast[t] = v_df.loc[t, 'test_0y_p']
        for c in [12, 18, 24]:
            if len(list(v_df['end_datetime'])) > c:
                acc_li = [v_df.loc[end_datetime, 'test_acc'][0] for end_datetime in end_datetimes]
                temp_dict['recent_' + str(c)] = float(mean(acc_li))
        rolling_m.append(pd.DataFrame(temp_dict, index=[k]))
        rolling_forecast_li.append(pd.DataFrame(rolling_forecast, index=[k]))
    return pd.concat(rolling_m, axis=0), pd.concat(rolling_forecast_li, axis=0)


def run_collector(s_path, t_path, s_adds, t_adds, s_adds_dir):
    selector_args = load_pickle(s_path)
    trainer_args = load_pickle(t_path)
    tvt, pt = ['train', 'valid', 'test'], ['predictions', 'targets']
    len_of_sel = len(selector_args)
    all_keys = list(selector_args[0].keys()) + list(trainer_args[0].keys())
    for i in range(1, len_of_sel):
        if selector_args[i].keys() == selector_args[0].keys():
            continue
        else:
            for key in selector_args[i]:
                if key in all_keys:
                    continue
                else:
                    all_keys.append(key)
    y_name = selector_args[0]['y_name']
    summary_pd, results = multi_get_summary(tvt, pt, all_keys, t_adds)
    collector_path = rolling_result(tvt, all_keys, summary_pd, y_name, s_adds_dir, selector_args)
    return collector_path, results


if __name__ == "__main__":
    s_path = '../../output/space/selector_subspace_2021_02_19_15_40_39_403207.pkl'
    t_path = '../../output/space/trainer_subspace_2021_02_19_15_40_39_403207.pkl'

    s_adds_dir = '../../output/selector/selector_2021_02_19_15_40_39_403207'
    t_adds_dir = '../../output/trainer/trainer_2021_02_19_15_40_39_403207'

    s_adds = get_file_list(s_adds_dir, [])
    t_adds = get_file_list(t_adds_dir, [])

    path, result = run_collector(s_path, t_path, s_adds, t_adds, s_adds_dir)
