import pandas as pd
import json, time
import os
from tqdm import tqdm
import subprocess
import numpy as np
from data.chaos_mesh_dir.query_chaos_mesh import export_chaos_mesh_data
from data.trace_dir.query_trace import export_trace_data
from sklearn.metrics import precision_score, recall_score, f1_score


def get_params():
    import argparse

    # 创建解析器
    parser = argparse.ArgumentParser()

    # 添加参数
    parser.add_argument("--start_time", type=int)
    parser.add_argument("--end_time", type=int)

    # 解析参数
    args = parser.parse_args()

    # 返回参数
    return args.start_time, args.end_time


def gen_train(trace_df: pd.DataFrame, anomaly_df: pd.DataFrame):
    # split train test
    save_dir = "platform"
    if not os.path.exists(save_dir):
        os.mkdir(save_dir)
    trace_df["timestamp"] = trace_df["timestamp"].apply(lambda x: int(x / 1000000))
    trace_df["label"] = 0
    meta_df = trace_df.loc[:, ["cmdb_id", "parent_span"]].rename(
        columns={"cmdb_id": "ccmdb_id", "parent_span": "span_id"}
    )
    trace_df = pd.merge(trace_df, meta_df, on=["span_id"])
    trace_df["call_path"] = trace_df["cmdb_id"] + "->" + trace_df["ccmdb_id"]
    call_paths = list(set(trace_df["call_path"].tolist()))
    call_paths.sort()
    for index, case in anomaly_df.iterrows():
        trace_df.loc[
            (trace_df["timestamp"] >= case["st_time"])
            & (trace_df["timestamp"] <= case["ed_time"]),
            "label",
        ] = 1
    normals = []
    pb = tqdm(total=len(trace_df), desc="For train data")
    for trace_id, group in trace_df.groupby("trace_id"):
        vec = []
        label = group["label"].sum()
        if label > 0:
            pb.update(len(group))
            continue
        for call_path in call_paths:
            tmp = group.query(f"call_path == '{call_path}'")
            if len(tmp) == 0:
                vec.append(0)
            else:
                vec.append(tmp["duration"].mean())
        if label == 0:
            normals.append(f"{trace_id}:{','.join(map(str, vec))}")
        pb.update(len(group))
    pb.close()

    with open(f"{save_dir}/train", "w", encoding="utf8") as w:
        w.write("\n".join(normals))

    with open(f"{save_dir}/idx.json", "w", encoding="utf8") as w:
        json.dump(call_paths, w)


def gen_test(trace_df: pd.DataFrame, anomaly_df: pd.DataFrame):
    # split train test
    save_dir = "platform"
    trace_df.loc[:, "timestamp"] = trace_df["timestamp"].apply(
        lambda x: int(x / 1000000)
    )
    trace_df["label"] = 0
    meta_df = trace_df.loc[:, ["cmdb_id", "parent_span"]].rename(
        columns={"cmdb_id": "ccmdb_id", "parent_span": "span_id"}
    )
    trace_df = pd.merge(trace_df, meta_df, on=["span_id"])
    trace_df["call_path"] = trace_df["cmdb_id"] + "->" + trace_df["ccmdb_id"]

    with open(f"{save_dir}/idx.json", "r", encoding="utf8") as r:
        call_paths = json.load(r)

    for index, case in anomaly_df.iterrows():
        trace_df.loc[
            (trace_df["timestamp"] >= case["st_time"])
            & (trace_df["timestamp"] <= case["ed_time"]),
            "label",
        ] = 1
    normals = []
    anomals = []
    pb = tqdm(total=len(trace_df), desc="For test data")
    for trace_id, group in trace_df.groupby("trace_id"):
        vec = []
        label = group["label"].sum()
        if label > 0:
            label = 1
        for call_path in call_paths:
            tmp = group.query(f"call_path == '{call_path}'")
            if len(tmp) == 0:
                vec.append(0)
            else:
                vec.append(tmp["duration"].mean())
        if label == 0:
            normals.append(f"{trace_id}:{','.join(map(str, vec))}")
        else:
            anomals.append(f"{trace_id}:{','.join(map(str, vec))}")
        pb.update(len(group))
    pb.close()

    with open(f"{save_dir}/test_normal", "w", encoding="utf8") as w:
        w.write("\n".join(normals))
    with open(f"{save_dir}/test_abnormal", "w", encoding="utf8") as w:
        w.write("\n".join(anomals))


def get_range_proba(proba, label):
    splits = np.where(label[1:] != label[:-1])[0] + 1

    is_anomaly = label[0] == 1
    new_proba = np.array(proba)
    pos = 0
    for sp in splits:
        if is_anomaly:
            new_proba[pos:sp] = np.max(proba[pos:sp])
        is_anomaly = not is_anomaly
        pos = sp
    sp = len(label)
    if is_anomaly:
        new_proba[pos:sp] = np.max(proba[pos:sp])
    return new_proba


def evaluation(true_label, score):
    score = np.array(score)

    score = get_range_proba(score, true_label)
    pre = []
    rec = []
    fsc = []
    for item in range(0, 100, 1):
        thr = item / 100
        predict_label = np.array(score >= thr, dtype=np.int32)
        precision = precision_score(true_label, predict_label) * 100
        recall = recall_score(true_label, predict_label) * 100
        if precision + recall == 0:
            f_score = 0
        else:
            f_score = 2 * precision * recall / (precision + recall)
        pre.append(precision)
        rec.append(recall)
        fsc.append(f_score)

    best_score = max(fsc)
    best_pre = pre[fsc.index(best_score)]
    best_rec = rec[fsc.index(best_score)]
    best_thr = fsc.index(best_score) / 100

    predict_label = np.array(score >= best_thr, dtype=np.int32)
    return best_pre, best_rec, best_score, predict_label


def run_experiment(test_df, anomaly_df):
    gen_test(test_df, anomaly_df)
    print("ok")
    subprocess.run(
        "python -m traceanomaly.main --trainpath platform/train --normalpath platform/test_normal --abnormalpath platform/test_abnormal --outputpath result",
        shell=True,
    )
    print("ok")
    result_path = "./webankdata/rnvp_result.csv"
    print("ok")
    result = pd.read_csv(result_path)
    _, _, _, pred = evaluation(result["label"].tolist(), result["score"].tolist())
    result["pred"] = pred
    id2pred = {case["id"]: case["pred"] for _, case in result.iterrows()}
    id2label = {case["id"]: case["label"] for _, case in result.iterrows()}

    results = []
    preds = []
    labels = []
    test_df = test_df.sort_values("timestamp")

    def detect(trace_id):
        if id2pred.get(trace_id, None) is None:
            return 0
        else:
            return id2pred[trace_id]

    def ann(trace_id):
        if id2label.get(trace_id, None) is None:
            return 0
        else:
            return id2label[trace_id]

    for timestamp, case in test_df.groupby("timestamp"):
        _preds = case["trace_id"].apply(lambda x: detect(x)).tolist()
        _labels = case["trace_id"].apply(lambda x: ann(x)).tolist()
        _pred = int(sum(_preds, 0) > 0)
        _label = int(sum(_labels, 0) > 0)
        results.append(
            [
                timestamp,
                timestamp + 1,
                _pred,
                _label,
            ]
        )
        preds.append(_pred)
        labels.append(_label)

    precision = precision_score(labels, preds) * 100
    recall = recall_score(labels, preds) * 100
    if precision + recall == 0:
        f_score = 0
    else:
        f_score = 2 * precision * recall / (precision + recall)

    return {
        "predict": results,
        "metric": [precision, recall, f_score],
    }


if __name__ == "__main__":
    data = {"result": "", "status": "error"}
    try:
        st_time, ed_time = get_params()
        # export data
        export_chaos_mesh_data(st_time, ed_time)
        export_trace_data(st_time, ed_time)

        trace_df = pd.read_csv("./data/trace.csv")
        anomaly_df = pd.read_csv("./data/ground_truth.csv").rename(
            columns={"起始时间戳": "st_time", "截止时间戳": "ed_time"}
        )
        # get result
        data["result"] = run_experiment(trace_df.tail(2000), anomaly_df)
        data["status"] = "success"
    except Exception as e:
        print("Exception!!!!", e)
        data["result"] = repr(e)
    finally:
        with open("./result/result.json", "w", encoding="utf-8") as f:
            json.dump(data, f)
        while 1:
            time.sleep(1)
