
from datetime import datetime
import pandas as pd
import numpy as np
import os
import pickle
from datetime import datetime,timedelta

def get_service_operation_name(span):
    service_name = span["PodName"].split("-")[0]
    operation_name = span["OperationName"].split("/")[-1]
    return service_name + "_" + operation_name

def trace_anomaly_detect(single_trace_operations, slo):
    """检测单个trace是否异常，使用operation_count[traceid]

    Args:
        single_trace_operations (dict): operation_count[traceid]
        slo (dict): 每个operation的slo

    Returns:
        bool: 异常True，正常False
    """
    expect_duration = 0.0
    real_duration = float(single_trace_operations["duration"]) / 1000.0
    for operation in single_trace_operations:
        if operation == "duration":
            continue
        expect_duration += single_trace_operations[operation] * (
            slo[operation][0] +1.5 *  slo[operation][1]
        )

    if real_duration > expect_duration:

        return True
    else:
        return False


def trace_list_partition(operation_count, slo):
    """将trace_list中的trace区分为正常的trace和异常的trace

    Args:
        operation_count (dict): 包含有每种operation计数的trace列表
        slo (dict): 每种operation的slo

    Returns:
        list，list: 异常trace列表，正常trace列表
    """
    normal_list = []  # normal traceid list
    abnormal_list = []  # abnormal traceid list
    for traceid in operation_count:
        normal = trace_anomaly_detect(
            single_trace_operations=operation_count[traceid], slo=slo
        )
        if normal:
            abnormal_list.append(traceid)
        else:
            normal_list.append(traceid)

    return abnormal_list, normal_list

def init_operation_dict(operation_list, operation_dict, trace_id):
    if trace_id not in operation_dict:
        operation_dict[trace_id] = {}
        for operation in operation_list:
            operation_dict[trace_id][operation] = 0
        operation_dict[trace_id]["duration"] = 0


def get_span_list(start_time=None, end_time=None, detect_time=None):
    """在线：根据开始时间和结束时间，获取sapn列表
       离线测试：根据csv时间获取sapn列表
    """
    if detect_time == None:
        temp = []
        while(start_time <= end_time):
            trace_file = "/home/dds/yjq/data/" + datetime.strftime(start_time, "%Y-%m-%d/trace/%H_%M") + "_trace.csv"
            if not os.path.exists(trace_file):
                start_time = start_time + timedelta(minutes=1)
                # print("not exits")
                continue
            start_time = start_time + timedelta(minutes=1)
            temp.append(pd.read_csv(trace_file))         
        span_list = pd.concat(temp, axis=0)
    else:
        
        trace_file = "/home/dds/yjq/data/" + datetime.strftime(detect_time, "%Y-%m-%d/trace/%H_%M") + "_trace.csv"
        if os.path.exists(trace_file):
            span_list = pd.read_csv(trace_file)
        else:
            span_list = pd.DataFrame()
    return span_list



def get_service_operation_list(span_list):
    """获取形式如Currencyservice_Convert的service_operation列表

    Args:
        span_list (df): span

    Returns:
        list: service_operation列表
    """
    service_operation_list = []
    for index, row in span_list.iterrows():
        service_operation = get_service_operation_name(row)
        if service_operation not in service_operation_list:
            service_operation_list.append(service_operation)

    return service_operation_list


def get_operation_slo(service_operation_list, span_list):
    traces = {}
    duration_dict = {}
    for operation in service_operation_list:
        duration_dict[operation] = []

    for trace_id, trace in span_list.groupby("TraceID", sort=False):
        if trace.iloc[0]["Duration"] > 100000000:
            continue
        traces[trace_id] = trace
        for index, span in trace.iterrows():
            service_operation = get_service_operation_name(span)
            duration_dict[service_operation].append(span["Duration"])

    operation_slo = {}
    for operation in service_operation_list:
        operation_slo[operation] = []

    for operation in service_operation_list:
        # 均值与方差
        operation_slo[operation].append(
            round(np.mean(duration_dict[operation]) / 1000.0, 4)
        )
        operation_slo[operation].append(
            round(np.std(duration_dict[operation]) / 1000.0, 4)
        )

    return operation_slo


def get_operation_duration_data(service_operation_list, span_list):
    """获取不同trace中operation的出现次数，以便检测异常

    Args:
        service_operation_list (_type_): _description_
        span_list (_type_): _description_

    Return:
       {
          traceid: {
              operation1: 1
              operation2: 2
              duration: 1000
          }
       }
    """
    operation_dict = {}
    for trace_id, trace in span_list.groupby("TraceID", sort=False):
        init_operation_dict(service_operation_list, operation_dict, trace_id)
        for index, span in trace.iterrows():
            operation_name = get_service_operation_name(span)
            operation_dict[trace_id][operation_name] += 1
            operation_dict[trace_id]["duration"] += span["Duration"]

    return operation_dict

if __name__ == "__main__":
    with open("/root/yjq/TraceAnomaly/slo.pkl", "rb") as f:
        operation_slo,_ = pickle.load(f)
    print(operation_slo)
    span_list_suffering = get_span_list(detect_time=datetime(2024,10,5,2,4,0))
    service_operation_list = get_service_operation_list(span_list_suffering)
    operation_dict = get_operation_duration_data(
        service_operation_list, span_list_suffering
    )
    abnormal_trace_list, normal_trace_list = trace_list_partition(
        operation_dict, operation_slo
    )
    # print(f"abnormal trace number: {len(abnormal_trace_list)}")
    # print(f"normal trace number: {len(normal_trace_list)}")




