{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "import importlib\n",
    "import os\n",
    "import sys\n",
    "import re\n",
    "import json\n",
    "import matplotlib.pyplot as plt\n",
    "import data_path  # 路径\n",
    "import data_cleaning\n",
    "from read_data import readCSV, read_xlrd, readCsvWithPandas\n",
    "from datetime import datetime\n",
    "from xlrd import xldate_as_tuple\n",
    "from show_Kpis import getKpis\n",
    "import anomaly_detection\n",
    "import network\n",
    "import resultForm"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "kpi_opened = {}\n",
    "left_n = 10  # 保留几个结果\n",
    "# 是否是执行者调用\n",
    "isExecutor = {\"JDBC\": False, \"LOCAL\": False, \"CSF\": False,\n",
    "              \"FlyRemote\": True, \"OSB\": True, \"RemoteProcess\": True}\n",
    "# 哪一天的数据\n",
    "days = ['2020_04_24','2020_04_25']\n",
    "# days=['2020_04_21']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def find_abnormal_indicators(execption_Interval, cmdb_id,paths):\n",
    "    \"\"\"[该时间区间内那个指标错误]\n",
    "\n",
    "    Args:\n",
    "        execption_Interval ([type]): [时间区间]\n",
    "        cmdb_id ([type]): [网源]\n",
    "        paths list: [path1,path2]\n",
    "    \"\"\"\n",
    "    kpis = {}\n",
    "    abnormal_indicators = []\n",
    "    # os,docker,db\n",
    "    file_name = data_path.fileNames[cmdb_id.split('_')[0]]\n",
    "    # file_path = os.path.join(data_path.get_data_path(),\"平台指标\",file_name)\n",
    "    # 查看当前文件是否已经分解\n",
    "    if kpi_opened.get(file_name) == None:\n",
    "        for p in paths:\n",
    "            kpis=getKpis([file_name],p,kpis)\n",
    "        kpi_opened[file_name] = kpis\n",
    "    else:\n",
    "        kpis = kpi_opened[file_name]\n",
    "    # 逐个指标的进行判断\n",
    "    for k, v in kpis.items():\n",
    "        temp = k.split(',')  # (cmdb_id,name,bomc_id,itemid)\n",
    "        if cmdb_id == temp[0]:\n",
    "            # todo 进行异常评估，给出得分\n",
    "            score = anomaly_detection_func(execption_Interval, np.array(v))\n",
    "            abnormal_indicators.append([temp[0], temp[1], temp[2], score])\n",
    "    # 排序返回得分最高的三个\n",
    "    return abnormal_indicators"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def anomaly_detection_func(execption_Interval, data):\n",
    "    \"\"\"[异常检测算法]\n",
    "\n",
    "    Args:\n",
    "        execption_Interval ([tutle]): [时间区间(start_time,end_time)]\n",
    "        data ([type]): [itemid,name,bomc_id,timestamp,valuee,cmdb_id]\n",
    "    \"\"\"\n",
    "    data = pd.DataFrame(data)\n",
    "    data.columns = ['itemid', 'name', 'bomc_id',\n",
    "                    'timestamp', 'value', 'cmdb_id']\n",
    "    # 根据时间戳排序\n",
    "    data.sort_values(\"timestamp\", inplace=True)\n",
    "    # 得到预测值\n",
    "    pred = anomaly_detection.iforest(data, [\"value\"])\n",
    "    data['pred'] = pred\n",
    "    # data.to_csv('outliers2.csv', columns=[\"timestamp\",'value',\"pred\", ], header=False)\n",
    "    timestamps = data['timestamp'].values.astype(np.int64)\n",
    "    total, abnormal_data_total = 0, 0\n",
    "    for timestamp, pred_num in zip(timestamps, pred):\n",
    "        if timestamp < execption_Interval[1] and timestamp > execption_Interval[0]:\n",
    "            total += 1\n",
    "            abnormal_data_total += 1 if pred_num == -1 else 0\n",
    "\n",
    "    return abnormal_data_total"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def find_abnormal_span(trace):\n",
    "    \"\"\"按照图的遍历方式遍历trace中的所有span\\n\n",
    "    Args:\n",
    "        trace ([dict]): 一条trace，格式{ startTime:str,{spanId:{},spanId:{}}}        \\n\n",
    "    Returns:\n",
    "        [list]: 返回异常节点       \\n\n",
    "    \"\"\"\n",
    "    spans = trace['spans']\n",
    "    graph = data_cleaning.generateGraph(spans)\n",
    "    if graph.get('root') == None:\n",
    "        return []\n",
    "    abnormal_cmdb_ids = []\n",
    "    Break = True\n",
    "    # isError代表上溯的节点是否有异常\n",
    "\n",
    "    def traverse(root_id, abn_ids, isError=False):\n",
    "        root = spans[root_id]\n",
    "        # 如果上溯有异常或本身有异常\n",
    "        if isError or root['success'] == 'False':\n",
    "            # 当发现是数据库出现问题时，将其他的清空，只保存数据库cmdb_id,并退出递归\n",
    "            if root['db'] and root['success'] == 'False':\n",
    "                abn_ids.clear()\n",
    "                abn_ids.append(root[\"db\"])\n",
    "                return Break\n",
    "            # 找出上一个失败的下一个成功\n",
    "            if isExecutor[root['callType']] and root['callType'] != 'OSB' \\\n",
    "                    and root['success'] == 'True':\n",
    "                abn_ids.clear()\n",
    "                abn_ids.append(root[\"cmdb_id\"])\n",
    "        isError = root['success'] == 'False'\n",
    "        # 如果没有子节点，直接返回\n",
    "        if graph.get(root_id) == None:\n",
    "            return not Break\n",
    "        for span_id in graph[root_id]:\n",
    "            if traverse(span_id, abn_ids, isError) == Break:\n",
    "                return Break\n",
    "        return not Break\n",
    "\n",
    "    for span_id in graph.get('root'):\n",
    "        abn_ids = []\n",
    "        traverse(span_id, abn_ids)\n",
    "        abnormal_cmdb_ids += abn_ids\n",
    "    return abnormal_cmdb_ids"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def find_abnormal_trace(execption_Interval, traces):\n",
    "    \"\"\"找到改异常区间内所有trace\n",
    "\n",
    "    Args:\n",
    "        execption_Interval ([type]): [时间区间]\n",
    "        traces ([type]): [description]\n",
    "    \"\"\"\n",
    "    abnormal_trace = []\n",
    "    for trace in traces.values():\n",
    "        startTime = int(trace['startTime'])\n",
    "        if startTime > execption_Interval[0] and startTime < execption_Interval[1]:\n",
    "            abnormal_trace.append(trace)\n",
    "    return abnormal_trace"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def to_standard_answer(result,fault_ids):\n",
    "    answer = {}\n",
    "    # 异常时间段\n",
    "    for fault_id, a_result in zip(fault_ids,result):\n",
    "        if len(a_result)==0 or (len(a_result)==1 and len(a_result[0])==0):\n",
    "            continue\n",
    "        cmdb = a_result[0][0].split(\"_\")[0] # docker\n",
    "        answer[fault_id]=[ cmdb, a_result[0][0] ] # docker_001\n",
    "        if len(a_result)==1:\n",
    "            answer[fault_id].extend(a_result[0][1:])\n",
    "            answer[fault_id].append([None])\n",
    "        else:\n",
    "        # 每一个异常时间段有多个指标\n",
    "            indicator_list = [an_indicator[1] for an_indicator in a_result]\n",
    "            answer[fault_id].append(indicator_list)\n",
    "    return answer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_abnormal_interval(days):\n",
    "    business_paths = [os.path.join(data_path.get_data_path(day), \"业务指标\", \"esb.csv\") for day in days]\n",
    "    # 获取业务指标数据，去掉表头,np.array\n",
    "    data = None\n",
    "    for p in business_paths:\n",
    "        data = pd.concat([data,pd.read_csv(p)],ignore_index=True)\n",
    "    data = data.values\n",
    "    # 根据时间序列排序\n",
    "    data = data[np.argsort(data[:, 1])]\n",
    "    # todo step1 异常时间序列\n",
    "    # 异常数据\n",
    "    abnormal_data = anomaly_detection.find_abnormal_data(data)\n",
    "    # 异常时间序列\n",
    "    execption_times = abnormal_data[:, 1].astype(np.int64)\n",
    "    #! 异常时间区间\n",
    "    # interval_times = anomaly_detection.to_interval(execption_times)\n",
    "    interval_times,fault_ids = anomaly_detection.fault_time(bias=5*60*100,file_day=days[0],type=2)\n",
    "    #! 对应时间区间是否是网络故障\n",
    "    is_net_error = []# anomaly_detection.is_net_error_func(interval_times,abnormal_data)\n",
    "    print(len(interval_times))\n",
    "    \n",
    "    for i,j in zip(interval_times,is_net_error):\n",
    "        print(i,j)\n",
    "    # 画出找到的异常区间\n",
    "    anomaly_detection.draw_abnormal_period(data, interval_times)\n",
    "\n",
    "    return interval_times,is_net_error,fault_ids"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 开始运行"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 调用链指标,平台指标,数据说明\n",
    "importlib.reload(network)\n",
    "plat_paths = [os.path.join(data_path.get_data_path(day),\"平台指标\") for day in days]\n",
    "interval_times,is_net_error,fault_ids = get_abnormal_interval(days)\n",
    "# print(fault_ids)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# todo step2 获取所有trace\n",
    "traces = {}\n",
    "for day in days:\n",
    "    prex_path = data_path.get_data_path(day)\n",
    "    trace_p = os.path.join(prex_path,\"调用链指标\")\n",
    "    data_cleaning.build_trace(trace_p,traces)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "abnormal_cmdb_all = []\n",
    "# 结果\n",
    "result = [ 0 for _ in range(len(interval_times))]\n",
    "#? 遍历每一个时间端\n",
    "for i in range(len(interval_times)):\n",
    "    # 异常时间区间\n",
    "    execption_Interval = interval_times[i]\n",
    "    # 异常指标\n",
    "    abnormal_indicators = []\n",
    "    # todo step3 找出这段时间内的trace\n",
    "    abnormal_traces = find_abnormal_trace(execption_Interval, traces)\n",
    "    # 如果是网络故障\n",
    "    # print(is_net_error[i])\n",
    "    # is_net_error[i] = False\n",
    "    if False:\n",
    "        #do something\n",
    "        net_error_cmdb_id = network.locate_net_error(abnormal_traces)\n",
    "        abnormal_cmdb_all.append(net_error_cmdb_id)\n",
    "        abnormal_indicators.append( net_error_cmdb_id )\n",
    "    else :\n",
    "        # abnormal_traces trace 中定位到具的体节点，即cmdb_id\n",
    "        abnormal_cmdb_ids = list(set(network.locate_net_error(abnormal_traces)))\n",
    "        # todo step4 找出异常数据中的异常节点\n",
    "        for trace in abnormal_traces:\n",
    "            abnormal_cmdb_ids += find_abnormal_span(trace)\n",
    "        # 去重\n",
    "        abnormal_cmdb_ids = list(set(abnormal_cmdb_ids))\n",
    "        \n",
    "        abnormal_cmdb_all.append(abnormal_cmdb_ids)\n",
    "        # todo step5 判断网元节点中是哪个指标有异常\n",
    "        for cmdb_id in abnormal_cmdb_ids:\n",
    "            # ? 找到异常指标c \n",
    "            abnormal_indicators.extend(find_abnormal_indicators(\n",
    "                execption_Interval, cmdb_id,plat_paths))\n",
    "            print(execption_Interval, cmdb_id)\n",
    "        # 对得到的异常指标进行排序\n",
    "        abnormal_indicators = sorted(\n",
    "            abnormal_indicators, key=lambda x: x[-1], reverse=True)[:left_n]\n",
    "        if len(abnormal_indicators) !=0 and int(abnormal_indicators[0][-1])==0:\n",
    "            abnormal_indicators = [abnormal_cmdb_ids]\n",
    "    result[i] = np.array(abnormal_indicators)\n",
    "\n",
    "\n",
    "for i in abnormal_cmdb_all:\n",
    "    print(i)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print(len(result))\n",
    "# for i in result:\n",
    "#     print(i)\n",
    "save_path = data_path.result_save_path()\n",
    "if not os.path.exists(save_path):\n",
    "    os.mkdir(save_path)\n",
    "with open(os.path.join(save_path,\"result_\"+days[0]), 'w') as f:\n",
    "    for fault_id, r in zip(fault_ids,result):\n",
    "        f.write(str(fault_id)+\":\\n\")\n",
    "        for o in r:\n",
    "            f.write(str(o)+'\\n')\n",
    "resultForm.resultForm(result,\"result_\"+days[0],fault_ids)\n",
    "\n",
    "answer = to_standard_answer(result,fault_ids)\n",
    "with open(os.path.join(save_path,\"answer_\"+days[0]+\".json\"), 'w') as f:\n",
    "    js = json.dumps(answer, indent=2)\n",
    "    f.write(js)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
