#!/usr/bin/python
# -*- coding: UTF-8 -*-
import json
import os
import threading
import time
from urllib.parse import urlparse

import requests
from fastapi import FastAPI
from fastapi.responses import HTMLResponse

app = FastAPI()


class File():
    @staticmethod
    def exists(filename):
        return os.path.exists(filename)

    @staticmethod
    def getFileContentAsJSON(filename):
        with open(filename, "rb") as f:  # 打开文件
            return json.loads(f.read())  # 读取文件

    @staticmethod
    def saveToFile(src, filename):
        with open(filename, "wb") as f:
            f.write(src.encode("utf-8"))

    @staticmethod
    def saveJSONToFile(srcJSON, filename):
        with open(filename, "wb") as f:
            f.write(json.dumps(srcJSON, indent=4, ensure_ascii=False).encode("utf-8"))

    @staticmethod
    def getFileContent(filename):
        with open(filename, "rb") as f:  # 打开文件
            return f.read()  # 读取文件


raw = File.getFileContent("urls.txt").decode("utf-8").split("\n")

# 构建path => 接口info的映射
yapiInfo = File.getFileContentAsJSON("api.json")
path2infos = {}
for item in yapiInfo:
    name = item["name"]
    for itf in item["list"]:
        path = itf["path"]
        path2infos[path] = {
            "name": name,
            "title": itf["title"],
            "path": path,
        }


def getInfoFromFullURL(fullURL):
    parsedResult = urlparse(fullURL)
    path = parsedResult.path
    if path not in path2infos:
        return {
            "name": None,
            "title": None,
            "path": path,
        }
    return path2infos[path]


# def flatRaw(raw):
#     ret = []
#     for k1 in raw:
#         for k2 in raw[k1]:
#             ret.append(raw[k1][k2]["onlyPathFalse"])
#     return ret


def get(url, params=None, headers=None):
    if params == None:
        dp = None
    else:
        dp = params
    ret = requests.get(url, params=dp, headers=headers)
    return ret


class CheckResult():
    def __init__(self, url=None, statusCode=None, businessCode=None, durationInMS=None, exceptionStr=None, msg="略",
                 detail="略", traceID=None):
        self.url = url
        self.statusCode = statusCode
        self.businessCode = businessCode
        self.durationInMS = durationInMS
        self.exceptionStr = exceptionStr
        self.msg = msg
        self.detail = detail
        self.traceID = traceID

    def isFail(self):
        # 2不算失败
        return self.exceptionStr != None or self.statusCode != 200 or self.businessCode not in (0, 2)

    # 加个提醒但是不算报错，方便排查数据问题, 特别是stock-data那边
    def isWarn(self):
        return self.exceptionStr == None and self.statusCode == 200 and self.businessCode == 2


def getOrDefault(m, k, d):
    if k in m:
        return m[k]
    return d


def tryGetTraceID(headers):
    try:
        #     # Uber-Trace-Id: 653d058b815f4706:653d058b815f4706:0000000000000000:1
        ret = headers.get("Uber-Trace-Id").split(":")[0]
    except:
        ret = "获取失败"
    return ret


def checkOne(fullURL):
    ret = CheckResult()
    ret.url = fullURL
    exception = None
    start = time.time_ns()
    try:
        resp = get(url=fullURL)
    except Exception as e:
        exception = e
    end = time.time_ns()
    ret.durationInMS = (end - start) / 1e6

    if exception != None:
        ret.exceptionStr = str(exception)
        return ret

    ret.traceID = tryGetTraceID(resp.headers)

    ret.statusCode = resp.status_code
    if resp.status_code != 200:
        return ret

    ret.businessCode = getOrDefault(resp.json(), "code", "字段不存在")
    if ret.businessCode != 0:
        ret.msg = getOrDefault(resp.json(), "msg", "字段不存在")
        ret.detail = getOrDefault(resp.json(), "detail", "字段不存在")

    return ret


def sortFun(a: CheckResult):
    return -a.durationInMS


import pandas as pd


def convertToHtml(result, title):
    # 将数据转换为html的table
    # result是list[list1,list2]这样的结构
    # title是list结构；和result一一对应。titleList[0]对应resultList[0]这样的一条数据对应html表格中的一列
    d = {}
    index = 0
    for t in title:
        d[t] = result[index]
        index = index + 1
    df = pd.DataFrame(d)
    df = df[title]
    h = df.to_html(index=True)
    return h


def trMat(raw):
    m = len(raw)
    n = len(raw[0])
    ret = [[0 for col in range(m)] for row in range(n)]
    for i in range(m):
        for j in range(n):
            ret[j][i] = raw[i][j]
    return ret


def toTr(x: CheckResult):
    info = getInfoFromFullURL(x.url)
    ret = [info["name"], info["title"], x.url, x.statusCode, x.businessCode, x.msg, x.detail, x.durationInMS,
           x.exceptionStr, x.traceID]
    return ret


def checkResults2html(lst):
    if len(lst) == 0:
        return "无"

    # result = [[u'2016-08-25',u'2016-08-26',u'2016-08-27'],[u'张三',u'李四',u'王二']]
    # title = [u'日期',u'姓名']
    # print(convertToHtml(result,title))
    title = ["name", "title", "URL", "状态码", "code", "msg", "detail", "耗时[ms]", "异常", "traceID"]
    return convertToHtml(
        trMat(list(map(toTr, lst))), title)


class Cache():
    def __init__(self, lmd=None, lastExec=None, expiredAt=None):
        self.lmd = lmd
        self.lastExec = lastExec
        self.expiredAt = expiredAt
        self.ret = None
        self.lock = threading.Lock()

    def isExpired(self):
        if self.expiredAt == None:
            return True
        ret = time.time_ns() > self.expiredAt
        return ret

    def getOrLoad(self):
        self.lock.acquire()
        if self.ret == None or self.isExpired():
            self.lastExec = time.time_ns()
            self.ret = self.lmd(self)
            # 默认1分钟过期
            self.expiredAt = self.lastExec + 60 * 1e9
        ret = self.ret
        self.lock.release()  # 释放锁
        return ret


def toTimeStr(ct):
    """
    秒转字符串
    :param ct:
    :return:
    """
    local_time = time.localtime(ct)
    data_head = time.strftime("%Y-%m-%d %H:%M:%S", local_time)
    # (1612713662.123-int(1612713662.123))*1000 => 122.99990653991699
    data_secs = round((ct - int(ct)) * 1000)
    return "%s.%03d" % (data_head, data_secs)


from multiprocessing.dummy import Pool as ThreadPool  # 线程池


def getCheckResults(urls):
    pool = ThreadPool(10)  # 创建一个包含4个线程的线程池
    ret = pool.map(checkOne, urls)
    pool.close()  # 关闭线程池的写入
    pool.join()  # 阻塞，保证子线程运行完毕后再继续主进程
    return ret


def getHTML(cache: Cache):
    urls = list(filter(lambda x: len(x) > 0, raw))

    results = getCheckResults(urls)

    fails = []
    warns = []
    succes = []
    for res in results:
        if res.isFail():
            fails.append(res)
        elif res.isWarn():
            warns.append(res)
        else:
            succes.append(res)
    fails.sort(key=sortFun)
    succes.sort(key=sortFun)
    warns.sort(key=sortFun)

    if cache.lastExec == None:
        lastExec = "无"
    else:
        lastExec = toTimeStr(cache.lastExec / 1e9)
    s = "上次执行时刻:{}（1分钟过期）" \
        "<br><br>失败:{}" \
        "<br><br>warn:{}" \
        "<br><br>成功:{}"\
        .format(lastExec,
                checkResults2html(fails),
                checkResults2html(warns),
                checkResults2html(succes))
    return s


cache = Cache(lmd=getHTML)


@app.get('/')
async def index():
    htmlContent = cache.getOrLoad()
    return HTMLResponse(content=htmlContent, status_code=200)


if __name__ == '__main__':
    # http://127.0.0.1:20216
    if False:
        uvicorn.run(app=app,
                    access_log=False,
                    host="127.0.0.1",
                    port=20216,
                    workers=1)

    htmlFileName = "output/summary.html"
    File.saveToFile(cache.getOrLoad(), htmlFileName)

    # 打开报告网页
    os.system("open {}".format(htmlFileName))
    # print(getHTML())
    # 逐个请求
