import sys
import os
import warnings
warnings.filterwarnings('ignore')
root_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(root_path)
# os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

from multiprocessing import Pool
import bagel
from glob import glob
from sample.config.global_data import change_list, test_data_path, train_data_path, project_path, epochs
from sample.plot.plot_res import plot_res_paper
import matplotlib.pyplot as plt
from sample.extra_process.extra_process import spot, filter_by_median_diff



def main(train_file, test_file, change):
    print(f'KPI: {test_file}')
    # 指标名称
    item_name = test_file.split(os.sep)[-1].split('.csv')[0]
    train_kpi = bagel.utils.load_kpi(train_file)
    # train_kpi.complete_timestamp()
    test_kpi = bagel.utils.load_kpi(test_file)
    # test_kpi.complete_timestamp()
    # 在标准化之前进行中位差的过滤
    if_analyze, median_diff_list = filter_by_median_diff(item_name, test_kpi)
    if not if_analyze:
        print(f'{item_name}的测试数据中位差系数过小')
        return
    train_kpi, mean, std = train_kpi.standardize()
    test_kpi, _, _ = test_kpi.standardize(mean=mean, std=std)

    model = bagel.Bagel()
    model.load(train_file.replace('kpi_data', 'param'))
    # 预测
    anomaly_scores, x_expectation, low_expectation, high_expectation = model.predict(
        test_kpi)
    x_expectation, low_expectation, high_expectation = (x_expectation * std) + mean, (low_expectation * std) + mean, (high_expectation * std) + mean, 
    train_anomaly_scores, _, _, _ = model.predict(
        train_kpi.slice_by_index(-1440))
    # 用spot给异常分数确定阈值
    thresholds, eval_label = spot(kpi=test_kpi, spot_data=anomaly_scores, init_data=train_anomaly_scores, item_name=item_name,
                                  low_expectation=low_expectation, high_expectation=high_expectation, median_diff=median_diff_list)
    # 画算法分析图
    bagel_data = {
        'anomaly_scores': anomaly_scores,
        'thresholds': thresholds,
        'median_diff_list': median_diff_list,
        'eval_label': eval_label,
        'low_expectation': low_expectation,
        'high_expectation': high_expectation,
        'x_expectation': x_expectation
    }
    fig = plot_res_paper(test_kpi, change, bagel_data=bagel_data, test_file=test_file)
    test_file_segment = test_file.split(os.sep)
    fig_path = os.path.join(project_path, 'out', 'analyze', change['name'])
    if not os.path.isdir(fig_path):
        os.makedirs(fig_path)
    fig_name = f'{test_file_segment[-2]}_{test_file_segment[-1]}.png'
    fig.tight_layout(pad=2, h_pad=1.5, w_pad=None)
    fig.savefig(os.path.join(fig_path, fig_name), xxbox_inches='tight')
    plt.close(fig=fig)


def train(train_file):
    print(f'train KPI: {train_file}')
    train_kpi = bagel.utils.load_kpi(train_file)
    train_kpi.complete_timestamp()
    train_kpi, mean, std = train_kpi.standardize()
    model = bagel.Bagel()
    model.fit(kpi=train_kpi.use_labels(0.),
              validation_kpi=None, epochs=epochs, verbose=1)
    model.save(train_file.replace('kpi_data', 'param'))


if __name__ == '__main__':

    # 1.遍历变更列表
    for change in change_list:
        # 2.获取测试数据实验组和对照组的所有数据列表
        test_file_list = glob(os.path.join(
            test_data_path, change['name'], '219', '**', '*.csv'), recursive=True)
        # 3.获取训练数据实验组和对照组和测试数据对应的数据列表
        train_file_list = glob(os.path.join(
            train_data_path, '**', '219', '**', '*.csv'), recursive=True)
        # 将train/0303_6days替换为test/change['name']
        replaced_train_file_list = [f.replace(os.path.join('train', '0303_6days'), os.path.join('test', change['name']))
                                    for f in train_file_list]
        # 4.筛选仅在测试数据中存在的训练数据
        filter_train_file_list = [train_file_list[index] for index in range(len(train_file_list)) if
                                  replaced_train_file_list[index] in test_file_list]
        # 4.筛选仅在训练数据中存在的测试数据
        replaced_test_file_list  = [f.replace(os.path.join('test', change['name']), os.path.join('train', '0303_6days'))
                                    for f in test_file_list]
        filter_test_file_list = [test_file_list[index] for index in range(len(test_file_list)) if
                                  replaced_test_file_list[index] in filter_train_file_list]                          
        # 4.将训练数据和测试数据配对成为新的数据列表，main的粒度是一条曲线
        if len(filter_test_file_list) != len(filter_train_file_list):
            print(f'测试数据{len(filter_test_file_list)}', f'训练数据{len(filter_train_file_list)}', '训练数据指标数据缺失')
            continue
        pool_param = [[train_file, test_file, change] for train_file,
                      test_file in zip(filter_train_file_list, filter_test_file_list)]
        # pool_param = [[train_file] for train_file in filter_train_file_list]
        with Pool(processes=5) as pool:
            print('start train')
            pool.starmap(main, pool_param)
            # pool.starmap(train, pool_param)
            pool.close()
            pool.join()
        # for train_file, test_file in zip(filter_train_file_list, test_file_list):
        #     main(train_file, test_file, change)
        #     # train(train_file)
