import json, os
import pandas as pd, numpy as np
from datetime import datetime, timezone
import logging
from collections import defaultdict

np.set_printoptions(suppress=True)


def date_to_timestamp(dt_str):
    dt = datetime.strptime(dt_str, "%Y-%m-%d %H:%M:%S")
    return int(dt.replace(tzinfo=timezone.utc).timestamp())


def find_nearest_ground_truth(ts, ground_truth, extended_time_window=60):
    for _, truth in ground_truth.iterrows():
        start_time = truth["start_time(utc)"] - extended_time_window
        end_time = (
            truth["start_time(utc)"]
            + int(truth["duration"][:-1])
            + extended_time_window
        )
        if ts >= start_time and ts <= end_time:
            return truth
    return None


def cal_cumulative_sum(l):
    pre = 0
    for i in range(len(l)):
        l[i] += pre
        pre = l[i]
    return l


from datetime import datetime, timezone
def timestamp_to_timestr(timestamp):

    # 将时间戳转换为UTC时间
    utc_time = datetime.utcfromtimestamp(timestamp).replace(tzinfo=timezone.utc)

    # 格式化为字符串
    formatted_time = utc_time.strftime("%Y-%m-%d %H:%M:%S")
    return formatted_time

    

def evaluate(candidate_causes_dir, ground_truth_dir, causes_dir, k=5, start_lag=0, end_lag=240):
    """F
    start_lag : int
        单位为秒
    end_lag : int
        单位为秒
    """
    with open(candidate_causes_dir) as f1, open(causes_dir) as f2:
        candidate_roots = json.load(f1)
        causes = json.load(f2)
    ground_truth = pd.read_csv(ground_truth_dir)
    ground_truth["start_time(utc)"] = ground_truth["start_time(utc)"].apply(
        date_to_timestamp
    )
    hit_num = [0] * (k + 1)
    case_num = len(ground_truth)
    type_hit_dict = {}

    # 遍历故障注入
    for index in range(len(ground_truth)):
        detected_mark = False
        start_time = ground_truth.loc[index, "start_time(utc)"]
        duration = ground_truth.loc[index, "duration"]
        end_time = start_time + int(duration.replace("s", ""))
        fault_type = ground_truth.loc[index, "fault_type"]
        fault_type = fault_type.split("（")[0].lower()
        # 不统计网卡corrupt
        if fault_type in ["网卡corrupt", "kafka干扰", "另一个gauss干扰"]:
            case_num -= 1
            continue
        if fault_type not in type_hit_dict:
            type_hit_dict[fault_type] = [0, 0]

        # 遍历定位结果
        for cause in causes:
            cause_start_time = cause.get("TimeStamp")
            # 只关注符合故障注入时间附近的第一个上报的异常的定位结果
            if start_time + start_lag <= cause_start_time // 1000 <= end_time + end_lag:
                # 统计该类型故障上报个数
                type_hit_dict[fault_type][1] += 1
                for i in range(1, k + 1):
                    if (
                        cause.get("top" + str(i)).get("root_cause").split("@")[0]
                        in candidate_roots[fault_type]
                    ):
                        print("="*20)
                        print(f"timestr: {timestamp_to_timestr(cause_start_time // 1000)}")
                        print(f"predict: {cause.get('top' + str(i)).get('root_cause')}")
                        print(f"groundtruth: {fault_type}")
                        hit_num[i] += 1
                        # 统计该类型故障命中个数
                        type_hit_dict[fault_type][0] += 1
                        break
                detected_mark = True
                break
        if detected_mark is False:
            case_num -= 1

    logging.info(f"hit_num: {hit_num}")
    hit_num = cal_cumulative_sum(hit_num)
    logging.info(f"accumulate hit_num: {hit_num}")
    if case_num == 0:
        precisions = hit_num
    else:
        precisions = list(map(lambda x: round(x / case_num, 4), hit_num))

    output = {
        "Dataset": causes_dir.split(os.path.sep)[-1][:-5],
        "case_num": case_num,
        "type_hit": type_hit_dict,
        "hit_num": hit_num,
        f"Avg@{k}": round(sum(precisions) / k, 4),
    }
    for i in range(1, k + 1):
        output[f"AC@{i}"] = precisions[i]
    logging.info(f"output: {output}")
    return output


if __name__ == "__main__":
    logging.basicConfig(
        format="%(asctime)s [%(levelname)s]: %(message)s", level=logging.INFO
    )

    candidate_causes_dir = "candidate_roots.json"

    ground_truth_dirs = [
        "dataset/vm2_20230319-04_20230319-19/ground_truth.csv"
    ]
    causes_dirs = [
        "output/vm2_20230319-04_20230319-19/vm2_20230319-04_20230319-19.json"
    ]
    topk = 5

    if len(ground_truth_dirs) != len(causes_dirs):
        raise ValueError("The experimental result does not match the marked quantity")
    outputs = []
    sum_case_num = 0
    sum_hit_num = [0] * (topk+1)
    for i in range(len(ground_truth_dirs)):
        output = evaluate(candidate_causes_dir, ground_truth_dirs[i], causes_dirs[i], topk)
        outputs.append(output)
        sum_case_num += output["case_num"]
        for j in range(topk+1):
            sum_hit_num[j] += output["hit_num"][j]
    sum_precision  = list(map(lambda x: round(x / sum_case_num, 4), sum_hit_num))
    sum_output = {
        "case_num": sum_case_num,
        "hit_num": sum_hit_num,
        f"Avg@{topk}": round(sum(sum_precision) / topk, 4)
    }
    for i in range(1, topk + 1):
        sum_output[f"AC@{i}"] = sum_precision[i]
    logging.info(f"sum_output: {sum_output}")
    outputs.append(sum_output)
    with open("evaluations.json", "w+") as f:
        f.write(json.dumps(outputs, indent=4, ensure_ascii=False))
