'''
用 best f1 score 确定异常检测结果
'''
import numpy as np
import pickle
from sklearn.metrics import precision_recall_curve
from typing import Sequence, Tuple, Dict, Optional
from global_data_2 import *
import os
import pathlib

root_path = os.path.join(project_path, f"ctf_code/{exp_key}")
# 存储各个簇选出的best f1 score对应的阈值
threshold_path = pathlib.Path(os.path.join(project_path, f"ctf_code/{exp_key}/label_data/threshold.npy"))

def get_mean_res(res_list, weight_list):
    return round(np.dot(np.array(res_list), np.array(weight_list)) / np.sum(weight_list), 4)

def get_mean_res(res_list, weight_list):
    return round(np.dot(np.array(res_list), np.array(weight_list)) / np.sum(weight_list), 4)

def _best_f1score(labels: np.ndarray, scores: np.ndarray) -> Tuple[float, float, float, float]:
    try:
        precision, recall, thresholds = precision_recall_curve(y_true=labels, probas_pred=scores)
    except:
        print(set(labels.tolist()))
        print(scores)
        raise Exception
    f1score = 2 * precision * recall / np.clip(precision + recall, a_min=1e-8, a_max=2000)

    best_threshold = thresholds[np.argmax(f1score)]
    best_precision = precision[np.argmax(f1score)]
    best_recall = recall[np.argmax(f1score)]

    return best_threshold, best_precision, best_recall, np.max(f1score)


# 获取每个族的所有机器的检测结果，按照一定顺序拼接起来
with open(os.path.join(root_path, 'z_results/z_cluster.pkl'), mode='rb') as f:
    cluster_list = pickle.load(f)

# 获取每个簇的机器列表
best_threshold_list = []
best_precision_list = []
best_recall_list = []
f1score_list = []
length_list = []
for cluster_index, machine_list in enumerate(cluster_list):
    # 读取异常分数
    score_list = []
    label_list = []
    # machine_id就是flow_id
    for machine_index, machine_id in enumerate(machine_list):
        # machine_index用于寻找对应机器的异常标注数据
        with open(os.path.join(root_path, f'label_data/{cluster_index + 1}_score.pkl'), mode='rb') as f:
            score_list.extend(pickle.load(f)[0][:, machine_index])
        with open(os.path.join(project_path, f'label_result/{int(machine_id)}.pkl'), mode='rb') as l:
            label_list.extend(pickle.load(l)[omni_windows_size - 1:])
        if len(score_list) != len(label_list):
            print(f"cluster_index:{cluster_index} len(score_list):{len(score_list)}, len(label_list):{len(label_list)}")
            raise Exception
    res_tuple = _best_f1score(np.array(label_list).astype(int), -np.array(score_list).astype(float))
    print(f"cluster_index:{cluster_index},label:{np.sum(label_list)},res_tuple:{res_tuple}")
    best_threshold_list.append(res_tuple[0])
    if np.isnan(res_tuple[3]):
        best_precision_list.append(0)
        best_recall_list.append(0)
        f1score_list.append(0)
        length_list.append(0)
    else:
        best_precision_list.append(res_tuple[1])
        best_recall_list.append(res_tuple[2])
        f1score_list.append(res_tuple[3])
        length_list.append(len(score_list))

for index, (threshold, p, r, f, l) in enumerate(
        zip(best_threshold_list, best_precision_list, best_recall_list, f1score_list, length_list)):
    print(f"cluster:{index},p:{round(p, 4)},r:{round(r, 4)},f:{round(f, 4)}, threshold:{round(threshold, 4)}, length:{l//477}")

print(
    f"mean,p:{get_mean_res(best_precision_list, length_list)},r:{get_mean_res(best_recall_list, length_list)},f:{get_mean_res(f1score_list, length_list)}")

print(np.sum(length_list)//477)
np.save(threshold_path, np.array(best_threshold_list))
