import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix
from tqdm import tqdm
import yaml
import argparse
from sklearn.preprocessing import minmax_scale
from sklearn.preprocessing import StandardScaler
from approach.MLSTM import get_model_MLSTM, get_prediction_MLSTM
import torch
import os
import json, time
from data.metric_dir.query_metric import export_metric_data
from data.chaos_mesh_dir.query_chaos_mesh import export_chaos_mesh_data


def get_params():
    import argparse

    # 创建解析器
    parser1 = argparse.ArgumentParser()

    # 添加参数
    parser1.add_argument("--start_time", type=int)
    parser1.add_argument("--end_time", type=int)

    # 解析参数
    args1 = parser1.parse_args()

    # 返回参数
    return args1.start_time, args1.end_time


# Global Configuration
with open("config.yml", "r") as f:
    config = yaml.load(f, Loader=yaml.FullLoader)
# print(config)

# # parse args
# parser = argparse.ArgumentParser()
# args = parser.parse_args()


def use_minmax_scale(df_data: pd.DataFrame):
    df_data = df_data.copy()
    df_data.loc[:, :] = minmax_scale(df_data)
    return df_data


def use_standard_scale(df_data: pd.DataFrame):
    df_data = df_data.copy()
    scaler = StandardScaler()
    df_data.loc[:, :] = scaler.fit_transform(df_data)
    return df_data


def load_data(df: pd.DataFrame):
    ts = df["timestamp"].tolist()
    labels = df["label"].tolist()
    df = df.set_index("timestamp").drop(columns=["label"])

    # normalize
    if config["scaler"] == "minmax":
        df = use_minmax_scale(df)
    if config["scaler"] == "standard":
        df = use_standard_scale(df)

    return df.to_numpy(), ts, labels


def train(train_data, params):
    model = get_model_MLSTM(
        train_data,
        params["modal"],
        params["seq_len"],
        params["batch_size"],
        params["epoch"],
        params["learning_rate"],
    )
    torch.save(
        model,
        "MLSTM.pt",
    )


def test(test_data, win_labels, labels, config):
    # run algorithms
    model = torch.load("MLSTM.pt")
    scores, dim_scores = get_prediction_MLSTM(
        model, test_data, config["seq_len"], config["modal"]
    )
    prec, rec, f1, preds = evaluation(labels, scores)
    return {
        "predict": list(
            zip(
                win_labels,
                [start_time + 60 for start_time in win_labels],
                preds,
                labels,
            )
        ),
        "metric": [prec, rec, f1],
    }


def get_range_proba(proba, label):
    splits = np.where(label[1:] != label[:-1])[0] + 1

    is_anomaly = label[0] == 1
    new_proba = np.array(proba)
    pos = 0
    for sp in splits:
        if is_anomaly:
            new_proba[pos:sp] = np.max(proba[pos:sp])
        is_anomaly = not is_anomaly
        pos = sp
    sp = len(label)
    if is_anomaly:
        new_proba[pos:sp] = np.max(proba[pos:sp])
    return new_proba


def evaluation(true_label, proba):
    from sklearn.metrics import precision_score, recall_score, f1_score

    proba = np.array(proba)
    new_proba = get_range_proba(proba, true_label)
    pre = []
    rec = []
    fsc = []
    for item in range(0, 100, 1):
        thr = item / 100
        predict_label = np.array(new_proba >= thr, dtype=np.int32)
        precision = precision_score(true_label, predict_label) * 100
        recall = recall_score(true_label, predict_label) * 100
        if precision + recall == 0:
            f_score = 0
        else:
            f_score = 2 * precision * recall / (precision + recall)
        pre.append(precision)
        rec.append(recall)
        fsc.append(f_score)

    best_score = max(fsc)
    best_pre = pre[fsc.index(best_score)]
    best_rec = rec[fsc.index(best_score)]
    best_thr = fsc.index(best_score) / 100

    predict_label = np.array(new_proba >= best_thr, dtype=np.int32).tolist()
    return best_pre, best_rec, best_score, predict_label


def gen_data(metric_df_list, st_time, ed_time, anomaly_df):
    print(st_time, ed_time)
    test_data = {"timestamp": list(range(int(st_time), int(ed_time), 1))}
    pb = tqdm(total=len(metric_df_list))
    for index, metric_df in enumerate(metric_df_list):
        ts_data = []
        median = metric_df["value"].median()
        for ts in range(int(st_time), int(ed_time), 1):
            tmp = metric_df.query(f"timestamp == {ts}")
            if len(tmp) == 0:
                ts_data.append(median)
            else:
                ts_data.append(tmp["value"].mean())
        test_data[f"col{index}"] = ts_data
        pb.update()
    pb.close()

    data = pd.DataFrame(test_data)
    data["label"] = 0
    for _, case in anomaly_df.iterrows():
        data.loc[
            (data["timestamp"] >= case["st_time"])
            & (data["timestamp"] <= case["ed_time"]),
            "label",
        ] = 1
    return data


if __name__ == "__main__":
    data = {"status": "error", "result": {}}
    try:
        start_time, end_time = get_params()
        # 导出数据
        export_chaos_mesh_data(start_time, end_time)
        export_metric_data(start_time, end_time)

        # 读取数据
        anomaly_df = pd.read_csv("./data/ground_truth.csv").rename(
            columns={"起始时间戳": "st_time", "截止时间戳": "ed_time"}
        )

        metric_df_list = []
        for dirpath, dirnames, filenames in os.walk(f"./data/metric_data/"):
            for filename in filenames:
                df = pd.read_csv(os.path.join(dirpath, filename))
                df = df.sort_values("timestamp")
                metric_df_list.append(df)

        test_df = gen_data(metric_df_list, start_time, end_time, anomaly_df)
        # test
        test_data, win_labels, labels = load_data(test_df)
        data["result"] = test(
            test_data, win_labels[:-10], labels[:-10], config["MLSTM"]
        )
        data["status"] = "success"
    except Exception as e:
        print(e)
        data["result"] = repr(e)
    finally:
        with open("./result/result.json", "w", encoding="utf-8") as f:
            json.dump(data, f)

        while 1:
            time.sleep(1)
