import pandas as pd
import numpy as np
import os
import pickle
from datetime import datetime, timedelta
from tqdm import tqdm

class SpanNode:
    def __init__(self, span_id, pod_name, operation_name, duration):
        self.span_id = span_id
        self.pod_name = pod_name
        self.operation_name = operation_name
        self.duration = duration
        self.children = []
        self.parent = None
        
    
    def add_child(self, child_node):
        child_node.parent = self
        self.children.append(child_node)
    
    def __str__(self) -> str:
        return f'{self.span_id}: has {len(self.children)} children'
    
    def get_span_name(self):
        service_name = self.pod_name.split('-')[0]
        operation_name_short = self.operation_name
        return f'{service_name}/{operation_name_short}'
    
    def dfs_paths_with_durations(self):
        all_paths_with_durations = []
        self._dfs_paths_with_durations_helper([], all_paths_with_durations, 0)
        return all_paths_with_durations

    def _dfs_paths_with_durations_helper(self, current_path, all_paths_with_durations, current_duration_sum):
        # 将当前节点添加到路径中，并更新持续时间总和
        current_path.append(self)
        current_duration_sum += self.duration

        # 将当前路径和持续时间总和添加到结果中
        all_paths_with_durations.append((list(current_path), current_duration_sum))  # 复制当前路径和持续时间总和

        # 递归遍历子节点
        for child in self.children:
            child._dfs_paths_with_durations_helper(current_path, all_paths_with_durations, current_duration_sum)

        # 回溯：从路径中移除当前节点，并从持续时间总和中减去当前节点的持续时间
        current_path.pop()
        current_duration_sum -= self.duration
        
class SpanTree:
    def __init__(self, trace_id, root = None):
        self.trace_id = trace_id
        self.root = root
    
    def set_root(self, root):
        self.root = root

def process_single_trace(trace_byid_dict, trace_id, call_dict):
    trace_demo = trace_byid_dict[trace_id]
    # 从底部往上
    traces = trace_demo.iloc[::-1].reset_index(drop=True)
    nodes = {}

    root_node = None

    # 不打乱顺序
    for span_id, span in traces.groupby('SpanID', sort=False):
        pod_name = span['PodName'].values[0]
        operation_name = span['OperationName'].values[0]
        duration = span['Duration'].values[0]
        span_node = SpanNode(span_id=span_id, pod_name=pod_name, operation_name=operation_name, duration=duration)
        nodes[span_id] = span_node

    for span_id, span in traces.groupby('SpanID', sort=False):
        span_node = nodes[span_id]
        if span['ParentID'].values[0] == 'root':
            root_node = span_node
        for span_id_child, span_child in trace_demo.groupby('SpanID'):
            if span_child['ParentID'].values[0] == span_id:
                span_node.add_child(nodes[span_id_child])
    calls = []
    responses = []
    def add_edges(parent, node):
        if parent is not None:
            calls.append(parent.get_span_name())
            calls.append(node.get_span_name())
            responses.append(node.duration)
        if node is None:
            return
        for child in node.children:
            add_edges(node, child)
    add_edges(None, root_node)
    if root_node == None:
        return call_dict
    # 获取并打印所有深度优先遍历路径及其持续时间总和
    paths_with_durations = root_node.dfs_paths_with_durations()
    for path, duration_sum in paths_with_durations:
        call_chain = '#'.join([node.get_span_name() for node in path])
        if call_chain == "":
            continue
        if call_chain not in call_dict.keys():
            call_dict[call_chain] = []
        call_dict[call_chain].append(float(duration_sum))
    return call_dict

def get_trace_calls(trace_byid_dict, trace_id):
    trace_demo = trace_byid_dict[trace_id]
    # 从底部往上
    traces = trace_demo.iloc[::-1].reset_index(drop=True)
    nodes = {}

    root_node = None

    # 不打乱顺序
    for span_id, span in traces.groupby('SpanID', sort=False):
        pod_name = span['PodName'].values[0]
        operation_name = span['OperationName'].values[0]
        duration = span['Duration'].values[0]
        span_node = SpanNode(span_id=span_id, pod_name=pod_name, operation_name=operation_name, duration=duration)
        nodes[span_id] = span_node

    for span_id, span in traces.groupby('SpanID', sort=False):
        span_node = nodes[span_id]
        if span['ParentID'].values[0] == 'root':
            root_node = span_node
        for span_id_child, span_child in trace_demo.groupby('SpanID'):
            if span_child['ParentID'].values[0] == span_id:
                span_node.add_child(nodes[span_id_child])
    calls = []
    responses = []
    def add_edges(parent, node):
        if parent is not None:
            calls.append(parent.get_span_name())
            calls.append(node.get_span_name())
            responses.append(node.duration)
        if node is None:
            return
        for child in node.children:
            add_edges(node, child)
    add_edges(None, root_node)
    # 获取并打印所有深度优先遍历路径及其持续时间总和
    if root_node == None:
        return None
    paths_with_durations = root_node.dfs_paths_with_durations()
    trace_call_dict = {}
    for path, duration_sum in paths_with_durations:
        call_chain = '#'.join([node.get_span_name() for node in path])
        if call_chain == "":
            continue
        if call_chain not in trace_call_dict.keys():
            trace_call_dict[call_chain] = []
        trace_call_dict[call_chain].append(float(duration_sum))
    trace_call_dict_avg = {}
    for call_path, response_arr in trace_call_dict.items():
        trace_call_dict_avg[call_path] = sum(response_arr)/len(response_arr)
    return trace_call_dict_avg

def get_train_call_dict(start, end):
    current_time = start
    data = []
    while(current_time <= end):
        file = f'/home/dds/yjq/data/{datetime.strftime(current_time, "%Y-%m-%d")}/trace/{datetime.strftime(current_time, "%H_%M")}_trace.csv'
        current_time = current_time + timedelta(minutes=1)
        if os.path.exists(file):
            data.append(pd.read_csv(file))
    total_data = pd.concat(data)
    trace_byid = total_data.groupby('TraceID', sort=False)
    trace_byid_dict = {}
    trace_ids = []
    for trace_id, spans in trace_byid:
        trace_byid_dict[trace_id] = spans
        trace_ids.append(trace_id)
    print(f'训练数据共有：{len(trace_ids)}条trace')
    call_dict = {}
    for trace_id in tqdm(trace_ids):
        call_dict = process_single_trace(trace_byid_dict, trace_id, call_dict)
    call_path_index_dict = {}
    for call_path, response_arr in call_dict.items():
        call_path_index_dict[call_path] = [np.array(response_arr).mean(), np.array(response_arr).std()]
    call_path_index_dict
    call_path_index = list(call_path_index_dict.keys())
    return call_path_index, call_path_index_dict

def transform_trace_to_stv(call_path_index, trace_byid_dict, trace_id):
    trace_call_dict_avg = get_trace_calls(trace_byid_dict, trace_id)
    if trace_call_dict_avg == None:
        return None
    stv = [0 for i in range(len(call_path_index))]
    for trace_call, response in trace_call_dict_avg.items():
        try:
            index = call_path_index.index(trace_call)
        except:
            continue
        if index != -1:
            stv[index] = response
    return stv

def process_file_to_stv(current_time, call_path_index, output_file):
    file = f'/home/dds/yjq/data/{datetime.strftime(current_time, "%Y-%m-%d")}/trace/{datetime.strftime(current_time, "%H_%M")}_trace.csv'
    total_data = pd.read_csv(file)
    trace_byid = total_data.groupby('TraceID', sort=False)
    trace_byid_dict = {}
    trace_ids = []
    for trace_id, spans in trace_byid:
        trace_byid_dict[trace_id] = spans
        trace_ids.append(trace_id)
    print(f'stv数据共有：{len(trace_ids)}条trace')
    for trace_id in tqdm(trace_ids):
        stv = transform_trace_to_stv(call_path_index, trace_byid_dict, trace_id)
        if stv == None:
            continue
        prefix = trace_id
        suffix = ','.join(map(str, stv))
        formatted_string = f'{prefix}:{suffix}\n'  # 添加换行符，以便每次追加的内容在新的一行

        # 写入文件
        with open(output_file, 'a') as file:
            file.write(formatted_string)

if __name__ == "__main__":
    # trainticket
    start = datetime(2025, 1, 9, 16, 40, 0)
    end = datetime(2025, 1, 9, 17, 10, 0)
    # # end = datetime(2024, 10, 3, 16, 22, 0)
    # call_path_index, call_path_index_dict = get_train_call_dict(start, end)
    # with open("idx.pkl", "wb") as f:
    #     pickle.dump(call_path_index_dict, f)
    
    with open("idx.pkl", "rb") as f:
        call_path_index_dict=pickle.load(f)
    call_path_index = list(call_path_index_dict.keys())
    current_time = start
    output_file = "/home/dds/yjq/TraceAnomaly/tt/train"
    while current_time <= end:
        process_file_to_stv(current_time, call_path_index, output_file)
        current_time = current_time + timedelta(minutes=1)