# -*- coding: utf-8 -*-
# standard
import os
import sys
import re
from datetime import datetime
from enum import Enum
from typing import Dict
from typing import List
from typing import Optional
from collections import Counter

# third
import matplotlib.pyplot as plt  # pip install matplotlib


# local
_P_PATH =  os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if _P_PATH not in sys.path:
    sys.path.append(_P_PATH)
from models import *


"""
@Title:   
@File: 绘制下载的图.py
@Author: walle 2023年07日05日 18时29分26秒
@Version: 1.0.0
@Desc: 
"""

LOG_DIR = os.path.join(R_P_PATH, "logs")
GLOBAL_COUNTER = Counter()


def get_row_count(title_value: str) -> int:
    GLOBAL_COUNTER[title_value] += 1
    return GLOBAL_COUNTER[title_value]


class LogTitle(Enum):
    DI: str = "di"  # 条码信号到达时
    CAPTURE: str = "capture"  # 拍摄信号
    CODE: str = "code"  # 条码到达时
    BEFORE_VALIDATE: str = "before_validate"  # 条码验证前


class Log(BaseModel):
    title: LogTitle = Field(...)
    event_time: datetime = Field(...)
    row_count: int = Field(...,)
    time_delta: float = Field(0)

    class Config:
        arbitrary_types_allowed = True


TITLE_MAP = {
    "收到长度为": LogTitle.DI,
    "收到拍摄信号": LogTitle.CAPTURE,
    "收到相机信号": LogTitle.CODE,
    "待验证条码条码已收齐": LogTitle.BEFORE_VALIDATE,
}


def parse_log_content(content: str, event_time: datetime) -> Log:
    # 分析一行日志的内容部分
    for prefix, title in TITLE_MAP.items():
        if content.startswith(prefix):
            row_count = re.search(f'row_count=(\d+)', content)
            row_count = int(row_count.groups(0)[0])
            time_delta = 0
            if prefix == '收到相机信号':
                camera_id = re.search(f'camera_id=(\d+)', content)
                camera_id = int(camera_id.groups(0)[0])
                time_delta = re.search(f'time_delta=(\S+)', content)
                time_delta = float(time_delta.groups(0)[0]) * 1000
                if camera_id != 1:  # todo: 只取相机1 的条码信号， 这个可以根据实际情况进行更改
                    return 
            one = Log(title=title, event_time=event_time, row_count=row_count, time_delta=time_delta)
            return one



def calculate_result(dat_set: Dict[str, List[Log]]):
    """
    计算统计结果
    :param dat_set: _description_
    """
    result = {}
    for title_value, data_list in dat_set.items():
        data_list = data_list[3:]  # 取子集是为了避免开始几个信号的偏差
        if len(data_list) == 0:
            continue
        length = len(data_list)
        all_time = (data_list[-1].event_time - data_list[0].event_time).total_seconds() * 1000
        avg_time = round(all_time / length)
        a_str = f"{length}次 {title_value} 信号，平均耗时 {avg_time} 毫秒"
        logger.debug(a_str)
        if title_value == LogTitle.CODE.value:
            avg_time = sum([x.time_delta for x in data_list]) / length
            avg_time = round(avg_time)
            a_str = f"{length}次 接收条码过程，平均耗时 {avg_time} 毫秒"
            logger.debug(a_str)
    # 计算时间偏差
    delta_set = {
        "拍摄延时": [],
        "条码到达延时": [],
        "备齐条码延时": [],
    }
    di_set = {x.row_count: x for x in dat_set['di']}
    capture_set = {x.row_count: x for x in dat_set['capture']}
    code_set = {x.row_count: x for x in dat_set['code']}
    before_set = {x.row_count: x for x in dat_set['before_validate']}
    for row_count, di in di_set.items():
        cur_di = di.event_time
        if row_count not in capture_set:
            continue
        cur_capture = capture_set[row_count].event_time
        cur_code = code_set[row_count].event_time
        status_before = len(dat_set['before_validate']) > 0
        if status_before:
            cur_before = before_set[row_count].event_time
        d1 = (cur_capture - cur_di).total_seconds() * 1000
        delta_set['拍摄延时'].append(d1)
        d2 = (cur_code - cur_capture).total_seconds() * 1000
        logger.debug(f"第{row_count}次从拍摄信号到相机收到第一个条码之间的延时: {d2} 毫秒")
        delta_set['条码到达延时'].append(d2)
        if status_before:
            d3 = (cur_before - cur_code).total_seconds() * 1000
            delta_set['备齐条码延时'].append(d3)
    for word, data_list in delta_set.items():
        length = len(data_list)
        all_time = sum(data_list)
        avg_time = round(all_time / (length - 1))
        a_str = f"{length - 1}次样本统计，平均{word} {avg_time} 毫秒"
        logger.debug(a_str)




def read_file(file_name: str = None) -> bool:
    """
    读取一个日志文件，受 limit 限制最大行数, 满了会返回 True
    :param file_name: _description_
    """
    if not file_name:
        file_name = f'{datetime.now().strftime("%y_%m_%d")}_debug.log'
    file_path = os.path.join(LOG_DIR, file_name)
    data_set ={x.lower(): [] for x in LogTitle.__members__}
    counter = 0
    with open(file=file_path, encoding="utf-8") as file:
        for log_str in file:
            counter += 1
            try:
                time_str, level, line_num, content, *_ = log_str.split(" | ")
                one = parse_log_content(content=content.strip(), event_time=datetime.strptime(time_str, "%Y-%m-%d %H:%M:%S.%f"))
                if one is None:
                    pass
                else:
                    k = one.title.value
                    data_set[k].append(one)
            except Exception as e:
                if e.args[0].startswith('not enough values to unpack'):
                    pass
                else:
                    raise e
    # 统计结果
    calculate_result(dat_set=data_set)


def parse_file1(file_name: str = None):
    if not file_name:
        file_name = f'{datetime.now().strftime("%y_%m_%d")}_debug.log'
    file_path = os.path.join(LOG_DIR, file_name)
    data_set =[]
    counter = 0
    keyword = "当前发送给软件的拍摄信号的索引是"
    pattern = r': (\d+)'
    with open(file=file_path, encoding="utf-8") as file:
        sn = 0
        for line in file:      
            if keyword in line:
               logger.debug(line)
               sn += 1
               num = int(re.search(pattern=pattern, string=line).groups()[0])
               data_set.append(num)
               if num != sn:
                   line


def draw_plot(sn: int = 0):
    """
    绘制折线图
    """
    plt.figure(figsize=(24, 14))  # 设置大小
    dir = os.path.join(os.path.dirname(os.path.realpath(__file__)))
    data = {}
    for x in db_session.query(ScanRecordOrm).all():
        container = data.get(x.row_count, [])
        container.append(x)
        if len(container) == 4:
            container.sort(key=lambda x: x.scan_time)
        data[x.row_count] = container
    result = {}
    for index in range(4):
        container = {"x": [], "y": []}
        for row_count, codes in data.items():
            if row_count % 10 != 0:
                continue
            code = codes[index]
            delta = (code.scan_time - code.capture_time).total_seconds() * 1000  # 色块到扫码的时间
            # delta = (code.validate_time - code.capture_time).total_seconds() * 1000  # 色块到验证的时间
            container['x'].append(row_count)
            container['y'].append(delta)
        result[index] = container
    for index, item in result.items():
        plt.bar(item['x'], item['y'], label=index)
        plt.savefig(os.path.join(dir, f"第{index}个条码处理延时图.jpg"))


def draw_delta():
    """
    查询多个时间差
    :param threshold: 判定阈值
    接收条码是拥堵的重灾区。发生拥堵时 2/3的时间浪费在这里，其次是写入数据库时， 验证条码环节基本不受影响
    """
    fig = plt.figure(figsize=(24, 14))  # 设置大小
    ax1 = fig.add_subplot(2, 2, 1)
    ax2 = fig.add_subplot(2, 2, 2)
    ax3 = fig.add_subplot(2, 2, 3)
    ax4 = fig.add_subplot(2, 2, 4)
    ax1.set_title("色块信号到读码成功的时间差", fontdict={"family": "SimSun"})
    ax2.set_title("读码成功到验证结束的时间差", fontdict={"family": "SimSun"})
    ax3.set_title("验证结束到写入数据库的时间差", fontdict={"family": "SimSun"})
    ax4.set_title("色块信号到写入数据库的时间差", fontdict={"family": "SimSun"})
    dir = os.path.join(os.path.dirname(os.path.realpath(__file__)))
    keys, d0_set, d1_set, d2_set, d3_set = [], [], [], [], []
    row_counts = set()
    for x in db_session.query(ScanRecordOrm).all():
        # if x.row_count % 100 != 0:
        #     continue
        d0 = (x.scan_time - x.capture_time).total_seconds() * 1000 # 色块信号到读码成功的时间差
        d1 = (x.validate_time - x.scan_time).total_seconds() * 1000 # 读码成功到验证结束的时间差
        d2 = (x.save_time - x.validate_time).total_seconds() * 1000 # 验证结束到写入数据库的时间差
        d3 = (x.save_time - x.validate_time).total_seconds() * 1000 # 色块信号到写入数据库的时间差
        if x.row_count not in row_counts:
            keys.append(x.row_count)
            d0_set.append(d0)
            d1_set.append(d1)
            d2_set.append(d2)
            d3_set.append(d3)
            row_counts.add(x.row_count)
    for i, k in enumerate(keys):
        logger.debug(f"row_count={k}, value={d0_set[i]}")
    ax1.bar(keys, d0_set)
    ax2.bar(keys, d1_set)
    ax3.bar(keys, d2_set)
    ax4.bar(keys, d3_set)
    # fig.show()
    fig.savefig(os.path.join(dir, "多个时间差.jpg"))


def draw_delta_hist(summary_id: int = 5):
    """
    查询多个时间差, 和 draw_delta 不同，这里用直方图表示
    :param threshold: 判定阈值
    接收条码是拥堵的重灾区。发生拥堵时 2/3的时间浪费在这里，其次是写入数据库时， 验证条码环节基本不受影响
    """
    fig = plt.figure(figsize=(24, 14))  # 设置大小
    ax1 = fig.add_subplot(2, 2, 1)
    ax2 = fig.add_subplot(2, 2, 2)
    ax3 = fig.add_subplot(2, 2, 3)
    ax4 = fig.add_subplot(2, 2, 4)
    ax1.set_title("色块信号到读码成功的时间差", fontdict={"family": "SimSun"})
    ax2.set_title("读码成功到验证结束的时间差", fontdict={"family": "SimSun"})
    ax3.set_title("验证结束到写入数据库的时间差", fontdict={"family": "SimSun"})
    ax4.set_title("色块信号到写入数据库的时间差", fontdict={"family": "SimSun"})
    dir = os.path.join(os.path.dirname(os.path.realpath(__file__)))
    keys, d0_set, d1_set, d2_set, d3_set = [], [], [], [], []
    row_counts = set()
    counters = []
    counter = 0
    threshold = 100000  # 超过此值的就忽略
    if summary_id:
        query = db_session.query(ScanRecordOrm).filter(ScanRecordOrm.summary_id == summary_id).all()
    else:
        query = db_session.query(ScanRecordOrm).all()
    for x in query:
        
        # if x.row_count % 100 != 0:
        #     continue
        d0 = (x.scan_time - x.capture_time).total_seconds() * 1000 # 色块信号到读码成功的时间差
        d1 = (x.validate_time - x.scan_time).total_seconds() * 1000 # 读码成功到验证结束的时间差
        d2 = (x.save_time - x.validate_time).total_seconds() * 1000 # 验证结束到写入数据库的时间差
        d3 = (x.save_time - x.validate_time).total_seconds() * 1000 # 色块信号到写入数据库的时间差
        if x.row_count not in row_counts and d0 < threshold and d1 < threshold and d2 < threshold and d3 < threshold:  # 去掉极端值
            counter += 1  
            counters.append(counter)
            keys.append(x.row_count)
            d0_set.append(d0)
            d1_set.append(d1)
            d2_set.append(d2)
            d3_set.append(d3)
            row_counts.add(x.row_count)
    for i, k in enumerate(keys):
        logger.debug(f"row_count={k}, value={d0_set[i]}")
    
    ax1.scatter(counters, d0_set, s=1)
    ax2.scatter(counters, d1_set, s=1)
    ax3.scatter(counters, d2_set, s=1)
    ax4.scatter(counters, d3_set, s=1)
    # fig.show()
    fig.savefig(os.path.join(dir, "多个时间差热力图.jpg"))



def get_avg_time(summary_id: int = None):
    """
    计算平均处理时间
    d0 发送条码到读码完成的时间
    d1 从读码完成到验证完成的时间
    d2 从读码完成到写入完成的时间
    其中，色块的时间是指：
    * 测试环境是色块信号开始发送时间到完全读码。
    * 生产环境是接收到色块信号到完全读码，
    
    10000采样(20000条数据)测试结果
    * 从读码完成到验证完成 | 最大延时:0.110043, 最小延时：0.0, 平均延时： 0.0026928043000000105
    * 从读码完成到写入完成 | 最大延时:4.364666, 最小延时：0.0, 平均延时： 1.9707844220000075


    已改造完成
    """
    plt.figure(figsize=(24, 14))  # 设置大小
    dir = os.path.join(os.path.dirname(os.path.realpath(__file__)))
    counter0, counter1, counter2, counter3 = [], [], [], []
    slowest_rows = []
    length = 0
    if summary_id:
        query = db_session.query(ScanRecordOrm).filter(ScanRecordOrm.summary_id == summary_id).all()
    else:
        query = db_session.query(ScanRecordOrm).all()
    for x in query:
        length += 1
        d0 = (x.scan_time - x.capture_time).total_seconds() 
        d1 = (x.validate_time - x.scan_time).total_seconds() 
        d3 = (x.validate_time - x.capture_time).total_seconds() 
        if d3 > 1.:
            slowest_rows.append({"row_count": x.row_count, 'd3': d3, "d0": d0, "d1": d1})
        d2 = (x.save_time - x.scan_time).total_seconds() 
        
        counter0.append(d0)
        counter1.append(d1)
        counter2.append(d2)
        counter3.append(d3)
    counter0.sort()
    counter1.sort()
    counter2.sort()
    counter3.sort()
    max0, min0 = counter0[-1], counter0[0]
    max1, min1 = counter1[-1], counter1[0]
    max2, min2 = counter2[-1], counter2[0]
    max3, min3 = counter3[-1], counter3[0]
    avg0 = sum(counter0) / length
    avg1 = sum(counter1) / length
    avg2 = sum(counter2) / length
    avg3 = sum(counter3) / length

    logger.debug(f"=========================任务id: {summary_id}, 取样数量: {length}=======================================")
    # logger.debug(f"从色块信号到读码完成 | 最大延时:{max0}, 最小延时：{min0}， 平均延时： {avg0} (目前已无效，不再统计)")
    logger.debug(f"从发送条码到读码完成 | 最大延时:{max0}, 最小延时：{min0}, 平均延时： {avg0} ")  # 表示接收耗时
    logger.debug(f"从读码完成到验证完成 | 最大延时:{max1}, 最小延时：{min1}, 平均延时： {avg1} ")  # 表示处理耗时
    logger.debug(f"从发送条码到验证完成 | 最大延时:{max3}, 最小延时：{min3}, 平均延时： {avg3} ") # 表示主要流程
    logger.debug(f"从读码完成到写入完成 | 最大延时:{max2}, 最小延时：{min2}, 平均延时： {avg2} ")  # 表示包含写入数据库的全流程
    # 打印最慢的记录
    # if (n := len(slowest_rows)) > 0:
    #     logger.debug(f"最慢的{n}条记录")
    #     slowest_rows.sort(key=lambda x: (x['d0'], x['d1']))
    #     for x in slowest_rows:
    #         print(x)
    


BEGIN = datetime(2023, 12, 13, 0, 0, 0)
BEGIN = datetime(2023, 12, 19, 16, 0, 0)  # 增加至7线程
# BEGIN = datetime(2023, 12, 20, 11, 0, 0)  # 增加至14线程


def draw_download_h1():
    """
    画出下载数量直方图
    :param n: 直方图的区间
    :return:
    """
    begin = BEGIN
    db_session = new_db_session()
    # query = db_session.query(RawVideoObjectOrm.download_time).filter(and_(RawVideoObjectOrm.download_time != None, RawVideoObjectOrm != 1)).order_by(RawVideoObjectOrm.download_time.asc()).first()
    cond = and_(MediaFileOrm.download_begin != None, 
                MediaFileOrm.download_end != None,
                MediaFileOrm.download_begin >= begin,
                MediaFileOrm.save_path != '')
    query = db_session.query(MediaFileOrm).where(cond)
    data1 = {}  # 小时和下载文件数组成的字典
    for x in query:
        key = x.download_begin.strftime('%m-%d %H')
        key = int((x.download_begin - begin).total_seconds() / 3600)
        value1 = data1.get(key, 0)
        value1 += 1
        data1[key] = value1
    data1 = [{"time": k, "counter": v} for k, v in data1.items()]
    data1.sort(key=lambda x: x['time'])
    x1, y1 = [], []
    for item in data1:
        x1.append(item['time'])
        y1.append(item['counter'])
    db_session.close()
    fig = plt.figure(figsize=(16, 8))  # 设置大小
    plt.xticks(rotation=True) 
    plt.title("每小时下载视频个数(个/小时)", fontdict={"family": "SimSun"})
    plt.xlabel("时间", fontdict={"family": "SimSun"})
    plt.ylabel("下载个数", fontdict={"family": "SimSun"})  
    # ax1.plot(x, y)
    plt.bar(x1, y1)
    fig.savefig(os.path.join(R_P_PATH, "test", "下载速度（个数）直方图.png"))
    # plt.show()
    # plt.pause(999)


def draw_download_h2():
    """
    画出下载流量直方图
    :param n: 直方图的区间
    :return:
    """
    begin = BEGIN
    db_session = new_db_session()
    # query = db_session.query(RawVideoObjectOrm.download_time).filter(and_(RawVideoObjectOrm.download_time != None, RawVideoObjectOrm != 1)).order_by(RawVideoObjectOrm.download_time.asc()).first()
    cond = and_(MediaFileOrm.download_begin != None, 
                MediaFileOrm.download_end != None,
                MediaFileOrm.download_begin >= begin,
                MediaFileOrm.save_path != '')
    query = db_session.query(MediaFileOrm).where(cond)
    data2 = {} # 小时和下载文件大小组成的字典
    for x in query:
        # key = x.download_begin.strftime('%m-%d %H')
        key = int((x.download_begin - begin).total_seconds() / 3600)
        value2 = data2.get(key, 0)
        value2 += x.get_speed_obj().size  # 单位 M
        data2[key] = value2
    data2 = [{"time": k, "size": v} for k, v in data2.items()]
    data2.sort(key=lambda x: x['time'])
    x2, y2 = [], []
    for item in data2:
        x2.append(item['time'])
        y2.append(item['size'] / 1024)
    db_session.close()
    fig = plt.figure(figsize=(16, 8))  # 设置大小
    plt.xticks(rotation=True) 
    plt.title("每小时下载视频流量(G/小时)", fontdict={"family": "SimSun"})
    plt.xlabel("时间", fontdict={"family": "SimSun"})
    plt.ylabel("下载流量", fontdict={"family": "SimSun"})  
    # ax1.plot(x, y)
    plt.bar(x2, y2)
    fig.savefig(os.path.join(R_P_PATH, "test", "下载速度（尺寸）直方图.png"))
    # plt.show()
    # plt.pause(999)




if __name__ == '__main__':
    draw_download_h1()
    draw_download_h2()
    # read_file()
    # parse_file1()
    # draw_plot()
    # 主要就是这三个，两个计算不同阶段的耗时曲线和直方图，一个计算不同阶段的平均耗时
    # draw_delta()  # 各阶段处理耗时的折现图 这个不直观
    # draw_delta_hist(6)  # 各阶段处理耗时的散点图  这个直观
    # get_avg_time(51)  # # 各阶段处理耗时的均值  这个直观
    pass