{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 数据处理 \n",
    "将csv格式的trace转换为TraceAnomaly适合的格式"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "class SpanNode:\n",
    "    def __init__(self, span_id, pod_name, operation_name, duration):\n",
    "        self.span_id = span_id\n",
    "        self.pod_name = pod_name\n",
    "        self.operation_name = operation_name\n",
    "        self.duration = duration\n",
    "        self.children = []\n",
    "        self.parent = None\n",
    "        \n",
    "    \n",
    "    def add_child(self, child_node):\n",
    "        child_node.parent = self\n",
    "        self.children.append(child_node)\n",
    "    \n",
    "    def __str__(self) -> str:\n",
    "        return f'{self.span_id}: has {len(self.children)} children'\n",
    "    \n",
    "    def get_span_name(self):\n",
    "        service_name = self.pod_name.split('-')[0]\n",
    "        operation_name_short = self.operation_name\n",
    "        return f'{service_name}/{operation_name_short}'\n",
    "    \n",
    "    def dfs_paths_with_durations(self):\n",
    "        all_paths_with_durations = []\n",
    "        self._dfs_paths_with_durations_helper([], all_paths_with_durations, 0)\n",
    "        return all_paths_with_durations\n",
    "\n",
    "    def _dfs_paths_with_durations_helper(self, current_path, all_paths_with_durations, current_duration_sum):\n",
    "        # 将当前节点添加到路径中，并更新持续时间总和\n",
    "        current_path.append(self)\n",
    "        current_duration_sum += self.duration\n",
    "\n",
    "        # 将当前路径和持续时间总和添加到结果中\n",
    "        all_paths_with_durations.append((list(current_path), current_duration_sum))  # 复制当前路径和持续时间总和\n",
    "\n",
    "        # 递归遍历子节点\n",
    "        for child in self.children:\n",
    "            child._dfs_paths_with_durations_helper(current_path, all_paths_with_durations, current_duration_sum)\n",
    "\n",
    "        # 回溯：从路径中移除当前节点，并从持续时间总和中减去当前节点的持续时间\n",
    "        current_path.pop()\n",
    "        current_duration_sum -= self.duration\n",
    "        \n",
    "class SpanTree:\n",
    "    def __init__(self, trace_id, root = None):\n",
    "        self.trace_id = trace_id\n",
    "        self.root = root\n",
    "    \n",
    "    def set_root(self, root):\n",
    "        self.root = root\n",
    "\n",
    "    # def display(self):\n",
    "    #     display_help(self.root)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "import os\n",
    "from datetime import datetime, timedelta\n",
    "from tqdm import tqdm"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def process_single_trace(trace_byid_dict, trace_id, call_dict):\n",
    "    trace_demo = trace_byid_dict[trace_id]\n",
    "    # 从底部往上\n",
    "    traces = trace_demo.iloc[::-1].reset_index(drop=True)\n",
    "    nodes = {}\n",
    "\n",
    "    root_node = None\n",
    "\n",
    "    # 不打乱顺序\n",
    "    for span_id, span in traces.groupby('SpanID', sort=False):\n",
    "        pod_name = span['PodName'].values[0]\n",
    "        operation_name = span['OperationName'].values[0]\n",
    "        duration = span['Duration'].values[0]\n",
    "        span_node = SpanNode(span_id=span_id, pod_name=pod_name, operation_name=operation_name, duration=duration)\n",
    "        nodes[span_id] = span_node\n",
    "\n",
    "    for span_id, span in traces.groupby('SpanID', sort=False):\n",
    "        span_node = nodes[span_id]\n",
    "        if span['ParentID'].values[0] == 'root':\n",
    "            root_node = span_node\n",
    "        for span_id_child, span_child in trace_demo.groupby('SpanID'):\n",
    "            if span_child['ParentID'].values[0] == span_id:\n",
    "                span_node.add_child(nodes[span_id_child])\n",
    "    calls = []\n",
    "    responses = []\n",
    "    def add_edges(parent, node):\n",
    "        if parent is not None:\n",
    "            calls.append(parent.get_span_name())\n",
    "            calls.append(node.get_span_name())\n",
    "            responses.append(node.duration)\n",
    "        if node is None:\n",
    "            return\n",
    "        for child in node.children:\n",
    "            add_edges(node, child)\n",
    "    add_edges(None, root_node)\n",
    "    if root_node == None:\n",
    "        return call_dict\n",
    "    # 获取并打印所有深度优先遍历路径及其持续时间总和\n",
    "    paths_with_durations = root_node.dfs_paths_with_durations()\n",
    "    for path, duration_sum in paths_with_durations:\n",
    "        call_chain = '#'.join([node.get_span_name() for node in path])\n",
    "        if call_chain == \"\":\n",
    "            continue\n",
    "        if call_chain not in call_dict.keys():\n",
    "            call_dict[call_chain] = []\n",
    "        call_dict[call_chain].append(float(duration_sum))\n",
    "    return call_dict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_trace_calls(trace_byid_dict, trace_id):\n",
    "    trace_demo = trace_byid_dict[trace_id]\n",
    "    # 从底部往上\n",
    "    traces = trace_demo.iloc[::-1].reset_index(drop=True)\n",
    "    nodes = {}\n",
    "\n",
    "    root_node = None\n",
    "\n",
    "    # 不打乱顺序\n",
    "    for span_id, span in traces.groupby('SpanID', sort=False):\n",
    "        pod_name = span['PodName'].values[0]\n",
    "        operation_name = span['OperationName'].values[0]\n",
    "        duration = span['Duration'].values[0]\n",
    "        span_node = SpanNode(span_id=span_id, pod_name=pod_name, operation_name=operation_name, duration=duration)\n",
    "        nodes[span_id] = span_node\n",
    "\n",
    "    for span_id, span in traces.groupby('SpanID', sort=False):\n",
    "        span_node = nodes[span_id]\n",
    "        if span['ParentID'].values[0] == 'root':\n",
    "            root_node = span_node\n",
    "        for span_id_child, span_child in trace_demo.groupby('SpanID'):\n",
    "            if span_child['ParentID'].values[0] == span_id:\n",
    "                span_node.add_child(nodes[span_id_child])\n",
    "    calls = []\n",
    "    responses = []\n",
    "    def add_edges(parent, node):\n",
    "        if parent is not None:\n",
    "            calls.append(parent.get_span_name())\n",
    "            calls.append(node.get_span_name())\n",
    "            responses.append(node.duration)\n",
    "        if node is None:\n",
    "            return\n",
    "        for child in node.children:\n",
    "            add_edges(node, child)\n",
    "    add_edges(None, root_node)\n",
    "    # 获取并打印所有深度优先遍历路径及其持续时间总和\n",
    "    if root_node == None:\n",
    "        return None\n",
    "    paths_with_durations = root_node.dfs_paths_with_durations()\n",
    "    trace_call_dict = {}\n",
    "    for path, duration_sum in paths_with_durations:\n",
    "        call_chain = '#'.join([node.get_span_name() for node in path])\n",
    "        if call_chain == \"\":\n",
    "            continue\n",
    "        if call_chain not in trace_call_dict.keys():\n",
    "            trace_call_dict[call_chain] = []\n",
    "        trace_call_dict[call_chain].append(float(duration_sum))\n",
    "    trace_call_dict_avg = {}\n",
    "    for call_path, response_arr in trace_call_dict.items():\n",
    "        trace_call_dict_avg[call_path] = sum(response_arr)/len(response_arr)\n",
    "    return trace_call_dict_avg"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_train_call_dict(start, end):\n",
    "    current_time = start\n",
    "    data = []\n",
    "    while(current_time <= end):\n",
    "        file = f'/root/yjq/data/{datetime.strftime(current_time, \"%Y-%m-%d\")}/trace/{datetime.strftime(current_time, \"%H_%M\")}_trace.csv'\n",
    "        current_time = current_time + timedelta(minutes=1)\n",
    "        if os.path.exists(file):\n",
    "            data.append(pd.read_csv(file))\n",
    "    total_data = pd.concat(data)\n",
    "    trace_byid = total_data.groupby('TraceID', sort=False)\n",
    "    trace_byid_dict = {}\n",
    "    trace_ids = []\n",
    "    for trace_id, spans in trace_byid:\n",
    "        trace_byid_dict[trace_id] = spans\n",
    "        trace_ids.append(trace_id)\n",
    "    print(f'训练数据共有：{len(trace_ids)}条trace')\n",
    "    call_dict = {}\n",
    "    for trace_id in tqdm(trace_ids):\n",
    "        call_dict = process_single_trace(trace_byid_dict, trace_id, call_dict)\n",
    "    call_path_index_dict = {}\n",
    "    for call_path, response_arr in call_dict.items():\n",
    "        call_path_index_dict[call_path] = [float(np.array(response_arr).mean()), float(np.array(response_arr).std())]\n",
    "    call_path_index_dict\n",
    "    call_path_index = list(call_path_index_dict.keys())\n",
    "    return call_path_index, call_path_index_dict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "def transform_trace_to_stv(call_path_index, trace_byid_dict, trace_id):\n",
    "    trace_call_dict_avg = get_trace_calls(trace_byid_dict, trace_id)\n",
    "    if trace_call_dict_avg == None:\n",
    "        return None\n",
    "    stv = [0 for i in range(len(call_path_index))]\n",
    "    for trace_call, response in trace_call_dict_avg.items():\n",
    "        if trace_call not in call_path_index:\n",
    "            continue\n",
    "        index = call_path_index.index(trace_call)\n",
    "        if index != -1:\n",
    "            stv[index] = response\n",
    "    return stv"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "def process_file_to_stv(current_time, call_path_index, output_file):\n",
    "    file = f'/root/yjq/data/{datetime.strftime(current_time, \"%Y-%m-%d\")}/trace/{datetime.strftime(current_time, \"%H_%M\")}_trace.csv'\n",
    "    total_data = pd.read_csv(file)\n",
    "    trace_byid = total_data.groupby('TraceID', sort=False)\n",
    "    trace_byid_dict = {}\n",
    "    trace_ids = []\n",
    "    for trace_id, spans in trace_byid:\n",
    "        trace_byid_dict[trace_id] = spans\n",
    "        trace_ids.append(trace_id)\n",
    "    print(f'stv数据共有：{len(trace_ids)}条trace')\n",
    "    for trace_id in tqdm(trace_ids):\n",
    "        stv = transform_trace_to_stv(call_path_index, trace_byid_dict, trace_id)\n",
    "        if stv == None:\n",
    "            continue\n",
    "        prefix = trace_id\n",
    "        suffix = ','.join(map(str, stv))\n",
    "        formatted_string = f'{prefix}:{suffix}\\n'  # 添加换行符，以便每次追加的内容在新的一行\n",
    "\n",
    "        # 写入文件\n",
    "        with open(output_file, 'a') as file:\n",
    "            file.write(formatted_string)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 确定一段正常时间，获得train数据和call_index"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "# start = datetime(2024, 10, 3, 16, 22, 0)\n",
    "# end = datetime(2024, 10, 3, 16, 51, 0)\n",
    "# # end = datetime(2024, 10, 3, 16, 22, 0)\n",
    "\n",
    "# call_path_index, call_path_index_dict = get_train_call_dict(start, end)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# import pickle\n",
    "# with open(\"idx.pkl\", \"wb\") as f:\n",
    "#     pickle.dump(call_path_index_dict,f)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open(\"idx.pkl\", \"rb\") as f:\n",
    "    call_path_index_dict=pickle.load(f)\n",
    "call_path_index = list(call_path_index_dict.keys())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "# start = datetime(2024, 10, 3, 16, 22, 0)\n",
    "# end = datetime(2024, 10, 3, 16, 40, 0)\n",
    "# current_time = start\n",
    "# output_file = \"train\"\n",
    "# while current_time <= end:\n",
    "#     process_file_to_stv(current_time, call_path_index, output_file)\n",
    "#     current_time = current_time + timedelta(minutes=1)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 测试数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "# output_file = \"test_normal\"\n",
    "# current_time = datetime(2024, 10, 3, 16, 25, 0)\n",
    "# process_file_to_stv(current_time, call_path_index, output_file)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "# output_file = \"test_abnormal\"\n",
    "# current_time = datetime(2024,10,5,2,4,0)\n",
    "# process_file_to_stv(current_time, call_path_index, output_file)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 根因定位"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pickle\n",
    "with open(\"idx.pkl\", \"rb\") as f:\n",
    "    call_path_index_dict=pickle.load(f)\n",
    "call_path_index = list(call_path_index_dict.keys())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_trace_rca(trace_byid_dict, trace_id, call_path_index_dict):\n",
    "    trace_demo = trace_byid_dict[trace_id]\n",
    "    # 从底部往上\n",
    "    traces = trace_demo.iloc[::-1].reset_index(drop=True)\n",
    "    nodes = {}\n",
    "\n",
    "    root_node = None\n",
    "\n",
    "    # 不打乱顺序\n",
    "    for span_id, span in traces.groupby('SpanID', sort=False):\n",
    "        pod_name = span['PodName'].values[0]\n",
    "        operation_name = span['OperationName'].values[0]\n",
    "        duration = span['Duration'].values[0]\n",
    "        span_node = SpanNode(span_id=span_id, pod_name=pod_name, operation_name=operation_name, duration=duration)\n",
    "        nodes[span_id] = span_node\n",
    "\n",
    "    for span_id, span in traces.groupby('SpanID', sort=False):\n",
    "        span_node = nodes[span_id]\n",
    "        if span['ParentID'].values[0] == 'root':\n",
    "            root_node = span_node\n",
    "        for span_id_child, span_child in trace_demo.groupby('SpanID'):\n",
    "            if span_child['ParentID'].values[0] == span_id:\n",
    "                span_node.add_child(nodes[span_id_child])\n",
    "    calls = []\n",
    "    responses = []\n",
    "    def add_edges(parent, node):\n",
    "        if parent is not None:\n",
    "            calls.append(parent.get_span_name())\n",
    "            calls.append(node.get_span_name())\n",
    "            responses.append(node.duration)\n",
    "        if node is None:\n",
    "            return\n",
    "        for child in node.children:\n",
    "            add_edges(node, child)\n",
    "    add_edges(None, root_node)\n",
    "    # 获取并打印所有深度优先遍历路径及其持续时间总和\n",
    "    if root_node == None:\n",
    "        return None\n",
    "    paths_with_durations = root_node.dfs_paths_with_durations()\n",
    "    longest_len = 0\n",
    "    rca = \"\"\n",
    "    for path, duration in paths_with_durations:\n",
    "        call = '#'.join([node.get_span_name() for node in path])\n",
    "        if duration > call_path_index_dict[call][0] + 3 * call_path_index_dict[call][1]:\n",
    "            if len(path)>longest_len:\n",
    "                longest_len = len(path)\n",
    "                rca = path[-1].pod_name\n",
    "                # rca = call\n",
    "    return rca\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "# from detect import *\n",
    "# current_time = datetime(2024,10,5,4,42,0)\n",
    "# with open(\"/root/yjq/TraceAnomaly/slo.pkl\", \"rb\") as f:\n",
    "#     operation_slo,_ = pickle.load(f)\n",
    "# span_list_suffering = get_span_list(detect_time=current_time)\n",
    "# service_operation_list = get_service_operation_list(span_list_suffering)\n",
    "# operation_dict = get_operation_duration_data(\n",
    "#     service_operation_list, span_list_suffering\n",
    "# )\n",
    "# abnormal_trace_list, normal_trace_list = trace_list_partition(\n",
    "#     operation_dict, operation_slo\n",
    "# )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "def traceanomaly_rca(current_time, call_path_index_dict, abnormal_trace_list):\n",
    "    file = f'/root/yjq/data/{datetime.strftime(current_time, \"%Y-%m-%d\")}/trace/{datetime.strftime(current_time, \"%H_%M\")}_trace.csv'\n",
    "    total_data = pd.read_csv(file)\n",
    "    trace_byid = total_data.groupby('TraceID', sort=False)\n",
    "    trace_byid_dict = {}\n",
    "    trace_ids = []\n",
    "    for trace_id, spans in trace_byid:\n",
    "        trace_byid_dict[trace_id] = spans\n",
    "        trace_ids.append(trace_id)\n",
    "    # (get_trace_calls(trace_byid_dict, abnormal_trace_list[0]))\n",
    "    rca_result = {}\n",
    "    for trace_id in trace_ids:\n",
    "        if trace_id in abnormal_trace_list:\n",
    "            rca = get_trace_rca(trace_byid_dict, trace_id, call_path_index_dict)\n",
    "            if rca not in rca_result.keys():\n",
    "                rca_result[rca] = 0\n",
    "            rca_result[rca] += 1\n",
    "    rca_result = sorted(rca_result.items(), key=lambda item: -item[1])\n",
    "    return rca_result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "# from traceanomaly.main import online_detect\n",
    "# import warnings\n",
    "# from datetime import datetime\n",
    "# import os\n",
    "# warnings.filterwarnings(\"ignore\")\n",
    "# detect_time = datetime(2024,10,5,4,51,0)\n",
    "# detect_file = datetime.strftime(detect_time, \"/root/yjq/TraceAnomaly/ob/%Y-%m-%d/%H_%M\")\n",
    "# print(detect_file)\n",
    "# # process_file_to_stv(detect_time, call_path_index, detect_file)\n",
    "# train_file = \"/root/yjq/TraceAnomaly/ob/train\"\n",
    "# output_file = \"/root/yjq/TraceAnomaly/ob/output.csv\"\n",
    "# # output_file = \"ouput.csv\"\n",
    "# # online_detect(train_file = train_file, detect_file = detect_file)\n",
    "# command = f\"python -m traceanomaly.main --trainfile {train_file} --detectfile {detect_file} --outputfile {output_file}\"\n",
    "# print(command)\n",
    "# os.popen(command).read()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "# df = pd.read_csv(output_file)\n",
    "# abnormal_list = list(df[(abs(df['score']) > 2)][\"id\"])\n",
    "# print(f\"abnormal num: {len(abnormal_list)}\")\n",
    "# traceanomaly_rca(detect_time, call_path_index_dict, abnormal_list)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "58"
      ]
     },
     "execution_count": 1,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import json\n",
    "from detect import *\n",
    "inject_jsons = [\n",
    "    \"/root/yjq/TraceAnomaly/ob/2024-10-05/2024-10-05-fault_list.json\"\n",
    "]\n",
    "# 打开并读取JSON文件\n",
    "file_arr = []\n",
    "for json_file in inject_jsons:\n",
    "    with open(json_file, 'r') as file:\n",
    "        file_arr.append(json.load(file))\n",
    "\n",
    "inject_faults = {}\n",
    "for single_file in file_arr:\n",
    "    for hour, faults in single_file.items():\n",
    "        for fault in faults:\n",
    "            inject_faults[fault[\"inject_time\"][:-3]] = [fault[\"inject_pod\"], fault[\"inject_type\"]]\n",
    "len(inject_faults)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.4482758620689655"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "26/58"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2024-10-05 01:33:00: ['recommendationservice-5b5b8c4977-2wsfh', 'deploy']\n",
      "python -m traceanomaly.main --trainfile /root/yjq/TraceAnomaly/ob/train --detectfile /root/yjq/TraceAnomaly/ob/2024-10-05/01_35 --outputfile /root/yjq/TraceAnomaly/ob/2024-10-05/output/01_35.csv\n",
      "abnormal num: 137\n",
      "[('productcatalogservice-69789c4db6-2lw6k', 68), ('productcatalogservice-69789c4db6-5nbgc', 66), ('shippingservice-6db677b7c6-mnz87', 2), ('shippingservice-6db677b7c6-h8wqw', 1)]\n"
     ]
    }
   ],
   "source": [
    "original_data_path = \"/root/yjq/data/\"\n",
    "output_path = \"/root/yjq/TraceAnomaly/ob/\"\n",
    "top1 = 0\n",
    "top3 = 0\n",
    "top5 = 0\n",
    "for inject_time, [inject_pod, inject_type] in inject_faults.items():\n",
    "    rca_time = datetime.strptime(inject_time, \"%Y-%m-%d %H:%M\")+timedelta(minutes=2)\n",
    "    print(\"**************************************************************************\")\n",
    "    print(f'{datetime.strptime(inject_time, \"%Y-%m-%d %H:%M\")}: {[inject_pod, inject_type]}')\n",
    "    span_list_current = get_span_list(detect_time=rca_time)\n",
    "    i = 0\n",
    "    while len(span_list_current) == 0 :\n",
    "        rca_time = rca_time + timedelta(minutes=1)\n",
    "        span_list_current = get_span_list(detect_time=rca_time)\n",
    "        i +=1 \n",
    "        if i > 5:\n",
    "            break\n",
    "    output_rca = output_path  + datetime.strftime(rca_time, \"%Y-%m-%d/%H_%M\") \n",
    "    if not os.path.exists(output_rca):\n",
    "        process_file_to_stv(rca_time, call_path_index, output_rca)\n",
    "    detect_time = rca_time\n",
    "    train_file = \"/root/yjq/TraceAnomaly/ob/train\"\n",
    "    output_file = datetime.strftime(detect_time, \"/root/yjq/TraceAnomaly/ob/%Y-%m-%d/output/%H_%M.csv\")\n",
    "    detect_file = datetime.strftime(detect_time, \"/root/yjq/TraceAnomaly/ob/%Y-%m-%d/%H_%M\")\n",
    "    command = f\"python -m traceanomaly.main --trainfile {train_file} --detectfile {detect_file} --outputfile {output_file}\"\n",
    "    os.popen(command).read()\n",
    "    df = pd.read_csv(output_file)\n",
    "    abnormal_list = list(df[(abs(df['score']) > 2)][\"id\"])\n",
    "    print(f\"abnormal num: {len(abnormal_list)}\")\n",
    "    result = traceanomaly_rca(detect_time, call_path_index_dict, abnormal_list)\n",
    "    print(result)\n",
    "    re_list = []\n",
    "    for i in range(len(result)):\n",
    "        re_list.append(result[i][0])\n",
    "    if inject_pod in re_list[0]:\n",
    "        top1 += 1\n",
    "    if inject_pod in re_list[:3]:\n",
    "        top3 += 1\n",
    "    if inject_pod in re_list[:5]:\n",
    "        top5 += 1\n",
    "    break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "top1: 0.0\n",
      "top3: 0.0\n",
      "top5: 0.0\n"
     ]
    }
   ],
   "source": [
    "print(f\"top1: {top1/len(inject_faults)}\")\n",
    "print(f\"top3: {top3/len(inject_faults)}\")\n",
    "print(f\"top5: {top5/len(inject_faults)}\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "trace-anomaly",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
