import os
import time
import pandas as pd
import csv
import datetime
import re
import chardet
import warnings
import count
warnings.filterwarnings('ignore')
currentpath = os.path.dirname(os.path.abspath(__file__))
from collections import defaultdict
def read_file(filepath):
    with open(filepath, 'rb') as f:
        encoding = chardet.detect(f.read()) [ 'encoding' ]
        return encoding


def write_csvlog(datadict: dict, logclass: str):
    now = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
    currentpath = os.path.dirname(os.path.abspath(__file__))
    csv_file = f"{logclass}_{now}.csv"
    csv_filepath = os.path.join(currentpath, csv_file)
    print('日志生成在如下路径: ', csv_filepath)
    with open(csv_filepath, mode='w', newline='', encoding='utf-8') as file:
        writer = csv.writer(file)
        for key, value in datadict.items():
            if isinstance(value, list):
                row = [ key ] + datadict [ key ]
                writer.writerow(row)
            else:
                writer.writerow([ key, value ])
    return csv_filepath


# 列表类数据写入txt文件
# def write_txtlog(datalist: list, logclass: str):
#     now = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
#     currentpath = os.path.dirname(os.path.abspath(__file__))
#     txt_file = f"{logclass}_{now}.txt"
#     txt_filepath = os.path.join(currentpath,'Logs', txt_file)
#     # print('日志生成在如下路径: ', txt_filepath)
#     data_str = '\n'.join([ str(item) for item in datalist ])
#     with open(txt_filepath, mode='w', newline='', encoding='utf-8') as file:
#         file.write(data_str)
#     return txt_filepath
def write_txtlog(datalist: list, logclass: str):
    now = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
    currentpath = os.path.dirname(os.path.abspath(__file__))
    txt_file = f"{logclass}_{now}.txt"
    txt_filepath = os.path.join(currentpath, txt_file)
    data_str = '\n'.join([str(item) for item in datalist])

    with open(txt_filepath, mode='w', newline='', encoding='utf-8') as file:
        file.write(data_str)

    return txt_filepath

# 匹配需求表-缺失协议及locust-task信息
def feature_protocols(procover_dict: dict):
    message_dict = {
        'task_ouput': '',
        'output_last': '',
        'misstask_path': '',
        'misspro_path': ''
    }

    yan_pro_sheet = procover_dict [ 'yan_pro_sheet' ]
    yan_filepath = procover_dict [ 'yan_filepath' ]
    model_cl = procover_dict [ 'model_cl' ]
    proto_cl = procover_dict [ 'proto_cl' ]
    feature_mix = procover_dict [ 'feature_mix' ]
    feature_test = procover_dict [ 'feature_test' ]
    pro_list = procover_dict [ 'pro_list' ]
    csv_write = procover_dict [ 'csv_write' ]
    locustpro = procover_dict [ 'locustpro' ]
    df = pd.read_excel(yan_filepath, sheet_name=yan_pro_sheet)
    model_cls = list(df [ f'{model_cl}' ].ffill())  # 合并单元格处理  向前填充
    # proto_cls = list(df [ f'{proto_cl}' ])
    proto_cls = list(df[f'{proto_cl}'].dropna().astype(int))
    result = {}
    if feature_mix == True:
        new_proto_cls = proto_cls
    else:
        matched_proto_cls = [ proto_cls [ i ] for i in range(len(model_cls)) if model_cls [ i ] == f'{feature_test}' ]
        print(122, proto_cls)
        new_proto_cls = list(matched_proto_cls)
        print(188, new_proto_cls)
    for i in range(len(new_proto_cls)):
        miss_pro = new_proto_cls [ i ]
        if str(miss_pro) not in pro_list:
            index = proto_cls.index(miss_pro)
            result [ miss_pro ] = model_cls [ index ]
    # locust-task任务表
    if locustpro == True:
        result_task = procover_dict [ 'result_task' ]
        miss_task = [ ]
        df = pd.read_excel(yan_filepath, sheet_name='task覆盖')
        task_cls = list(df [ 'task任务列' ])
        for item in task_cls:
            if item not in result_task:
                miss_task.append(item)  # 记录缺失的task任务

        task_ouput = (f'----------缺失task任务如下----------\n'
                      + f'{miss_task}\n'
                      + f'共计{len(miss_task)}条任务')
        # print(task_ouput)
        message_dict [ 'task_ouput' ] = task_ouput
        if csv_write:
            misstask_path = write_txtlog(miss_task, 'misstask')
            message_dict [ 'misstask_path' ] = misstask_path
    pro_output = (f'----------缺失协议如下----------\n'
                  + f'{result}\n'
                  + f'共计{len(result)}条协议')
    # print(pro_output)
    message_dict [ 'output_last' ] = pro_output
    # if csv_write == True:
    #     misspro_path = write_csvlog(result, 'misspro')
    #     message_dict [ 'misspro_path' ] = misspro_path
    return message_dict


# 匹配需求表-错误码信息
def feature_ercode(logercode_dict: dict):

    yan_err_sheet = logercode_dict [ 'yan_err_sheet' ]
    yan_filepath = logercode_dict [ 'yan_filepath' ]
    errcode_cl = logercode_dict [ 'errcode_cl' ]
    detail_cl = logercode_dict [ 'detail_cl' ]
    err_list = logercode_dict [ 'log_err_list' ]
    csv_write = logercode_dict [ 'csv_write' ]
    df = pd.read_excel(yan_filepath, sheet_name=yan_err_sheet)
    ids = list(df [ f'{errcode_cl}' ])
    id_detail = list(df [ f'{detail_cl}' ])
    result = {}
    for item in err_list:
        if item in ids:
            index = ids.index(item)  # 获取'item'在Excel中的位置
            result [ item ] = id_detail [ index ]
        else:
            result [ item ] = '无描述信息'
    csv_filepath = ''
    if not result:
        output_result = '无匹配描述信息'
    else:
        output_result = f'错误码描述信息如下:\n {result}'
        print("output_result: ", output_result)
        if csv_write:
            errcode_filepath = write_csvlog(result, 'errcode')
            csv_filepath = errcode_filepath
    return output_result, csv_filepath


# 协议日志统计表-协议及locust-task列
def log_protocols(procover_dict: dict):
    log_filepath = procover_dict [ 'log_filepath' ]
    log_pro_cl = procover_dict [ 'log_pro_cl' ]
    log_total = procover_dict [ 'log_total' ]
    locustpro = procover_dict [ 'locustpro' ]
    result = [ ]
    result_task = [ ]

    service_data = defaultdict(dict)
    proto_list = count.parse_log_file(log_filepath, service_data)

    for filename, data in service_data.items():
        print(f"\n▶️ 文件 [{filename}] 最新数据")
        print("─" * 85)
        print("{:<8} | {:<6} | {:<8} | {:<8} | {:<8} | {:<8}".format(
            "服务", "TPM", "成功率", "最大响应", "平均响应", "90%响应"))
        for name, metrics in data.items():
            print(f"{name.ljust(8)} | {metrics['tpm']:6} | {metrics['success']:7.2f}% | "
                  f"{metrics['max_time']:8}ms | {metrics['avg_time']:8}ms | {metrics['avg90_time']:8}ms")

    file_encoding = read_file(log_filepath)
    if locustpro == True:  # locust日志专用-协议
        pass
        # with open(log_filepath, encoding=file_encoding) as file:
        #     reader = csv.DictReader(file)
        #
        #     for row in reader:
        #         item = row [ log_pro_cl ]
        #         if row [ 'Type' ] == 'Action':
        #             result.append(item [ 1: ])
        #         elif row [ 'Type' ] == 'Task':
        #             result_task.append(item)
    else:
        for item in proto_list:
            if item != log_total:
                result.append(item)
        # with open(log_filepath, encoding=file_encoding) as file:
        #     reader = csv.DictReader(file)
        #     id_column = [ row [ f'{log_pro_cl}' ] for row in reader ]
        # 遍历'协议'列表并筛选出不是汇总行的元素
        # for item in id_column:
        #     if item != log_total:
        #         result.append(item)
    procover_dict.update({'service_data': service_data})
    procover_dict [ 'pro_list' ] = result
    print("actual_log: ", procover_dict [ 'pro_list' ])

    if result_task:
        procover_dict [ 'result_task' ] = result_task
    message_dict = feature_protocols(procover_dict)

    return message_dict,service_data


# 日志统计-错误信息相关
def log_errcodes(logercode_dict: dict):
    # start = time.time()
    message_dict = {
        'errmsg': '',
        'output_last': '',
        'errcode_filepath': '',
    }
    log_file_path = logercode_dict [ 'log_file_path' ]
    regex = logercode_dict [ 'regex' ]
    codes_set = set()
    error_str = ''
    try:
        if os.path.isdir(log_file_path):
            # 如果是目录，则遍历该目录下所有文件并查找
            for dirpath, dirnames, filenames in os.walk(log_file_path):
                for filename in filenames:
                    file_path = os.path.join(dirpath, filename)
                    file_encoding = read_file(file_path)
                    with open(file_path, 'r', encoding=file_encoding) as f:
                        lines = f.readlines()
                    pattern = re.compile(regex)
                    for line in lines:
                        match = pattern.match(line)

                        if match:
                            error_str = match.group(1)
                            code = match.group(2)
                            codes_set.add(int(code))
        else:
            file_encoding = read_file(log_file_path)
            with open(log_file_path, 'r', encoding=file_encoding) as f:
                lines = f.readlines()
            pattern = re.compile(regex)
            for line in lines:
                # match = pattern.match(line)
                match = pattern.search(line)
                if match:
                    error_str = match.group(0)
                    print("error_str: ", error_str)
                    code = match.group(1)
                    print("code: ", code)
                    codes_set.add(int(code))
    except UnicodeDecodeError as Exception:
        errmsg = '编码格式错误'
        # print(errmsg)
        message_dict [ 'errmsg' ] = errmsg
    if len(codes_set) > 0:
        logercode_dict [ 'log_err_list' ] = codes_set
        print("logercode_dict: ", logercode_dict)
        output_result, ercode_filepath = feature_ercode(logercode_dict)
        message_dict [ 'errcode_filepath' ] = ercode_filepath
        output_last = '-------------压测错误码如下------\n' + f'匹配查找：{error_str}\n' + output_result

    else:
        output_last = '没有查找到匹配信息.'
    # print(output_last)
    message_dict [ 'output_last' ] = output_last
    # print(f"总耗时: {time.time() - start:.2f}s")
    return message_dict


# 按照功能模块——》对应所有协议数量加和统计
def feature_weight(weight_dict: dict):
    message_dict = {
        'errmsg': '',
        'output_last': '',
        'featureweight_filepath': '',
        'featurenum_filepath': '',
    }
    yan_filepath = weight_dict [ 'yan_filepath' ]
    weight_sheet = weight_dict [ 'weight_sheet' ]
    weight_model = weight_dict [ 'weight_model' ]
    weight_num = weight_dict [ 'weight_num' ]
    yan_pro_sheet = weight_dict [ 'yan_pro_sheet' ]
    model_cl = weight_dict [ 'model_cl' ]
    proto_cl = weight_dict [ 'proto_cl' ]
    log_filepath = weight_dict [ 'log_filepath' ]
    log_pro_cl = weight_dict [ 'log_pro_cl' ]
    csv_write = weight_dict [ 'csv_write' ]
    pro_sum = weight_dict [ 'pro_sum' ]

    # 权重表取功能 及最小权重-功能
    workbook = pd.read_excel(yan_filepath, sheet_name=weight_sheet)
    target_column_index = workbook.columns.get_loc(weight_model)
    target_column_values = workbook.iloc [ :, target_column_index ].dropna().unique()
    # 按照原始顺序
    value_and_index = [ (value, index) for index, value in enumerate(workbook.iloc [ :, target_column_index ]) if
                        pd.notnull(value) ]
    value_and_index = sorted(value_and_index, key=lambda x: x [ 1 ])
    target_column_values = [ x [ 0 ] for x in value_and_index if x [ 0 ] in target_column_values ]
    # 找到权重列中的最小值
    df = workbook.set_index(weight_num)
    min_weight = df.index [ 0 ]
    for weight in df.index:
        if weight < min_weight:
            min_weight = weight
    model_wnum = {}
    min_row = df.loc [ min_weight ]
    function_value = min_row [ weight_model ].iloc [ 0 ]
    model_wnum [ function_value ] = int(min_weight)
    print("model_wnum：", model_wnum)
    # 协议覆盖表取协议
    a_sheet = pd.read_excel(yan_filepath, sheet_name=yan_pro_sheet)
    function_column = a_sheet.loc [ :, model_cl ]
    merged_cells = function_column.isna()
    for i in range(len(merged_cells)):
        if merged_cells.iloc [ i ]:
            function_column.iloc [ i ] = function_column.iloc [ i - 1 ]
    result = {}
    for b_value in target_column_values:
        rows = a_sheet [ a_sheet [ model_cl ] == b_value ]
        values = rows [ proto_cl ].tolist()
        result [ b_value ] = values
    new_result = {}
    try:
        # 日志表对应功能加和协议请求数
        file_encoding = read_file(log_filepath)
        df = pd.read_csv(log_filepath, encoding=file_encoding)
        df = df.set_index(log_pro_cl)

        for key, values in result.items():
            total = 0
            for value in values:
                if value in df.index:
                    total += df.loc [ value, pro_sum ]
            new_result [ key ] = int(total)
        print("new_result：", new_result)

    except UnicodeDecodeError as Exception:
        errmsg = '编码格式错误'
        # print(errmsg)
        message_dict [ 'errmsg' ] = errmsg

    if csv_write:
        featurenum_filepath = write_csvlog(new_result, 'featurenum')
        message_dict [ 'featurenum_filepath' ] = featurenum_filepath
    # model_wnum 日志表对应功能:权重占比 最小值为基准
    result_weight = model_num(model_wnum, new_result)
    print("result_weight:", result_weight)
    output_last = '----------功能模块：协议请求总数如下：------\n' + f'{new_result}\n' + '----------功能模块：协议总数权重占比如下：------\n' + f'基准值: {model_wnum}\n' + f'{result_weight}\n'
    message_dict [ 'output_last' ] = output_last
    # print(output_last)
    if csv_write:
        featureweight_filepath = write_csvlog(result_weight, 'featureweight')
        message_dict [ 'featureweight_filepath' ] = featureweight_filepath
    return message_dict


# 日志表对应功能:权重占比
def model_num(model_wnum, new_result):
    factor = 1
    for key in model_wnum:
        if key in new_result and new_result[key] != 0:
            factor = int(new_result [ key ] / model_wnum [ key ])
            break
    result_weight = {}
    for key, value in new_result.items():
        if factor != 0:
            new_value = int(value / factor)
            result_weight [ key ] = new_value
    return result_weight


def stats_problem(problem_dict):
    message_dict = {
        'errmsg': '',
        'output_last': '',
        'problem_pro_filepath': '',
    }

    log_file_path = problem_dict['log_file_path']
    log_pro_cl = problem_dict['log_pro_cl']
    log_total = problem_dict['log_total']
    pro_success = problem_dict['pro_success']
    pro_tps = problem_dict['pro_tps']
    pro_max = problem_dict['pro_max']
    pro_avg = problem_dict['pro_avg']
    pro_90th = problem_dict['pro_90th']
    csv_write = problem_dict['csv_write']
    locustpro = problem_dict['locustpro']
    failnum = problem_dict.get('failnum', 0)  # 默认值为0

    file_encoding = read_file(log_file_path)
    df = pd.read_csv(log_file_path, encoding=file_encoding)
    df = df.set_index(log_pro_cl)

    bot_nine = 0
    bot_tps = 0
    bot_max = 0
    bot_avg = 0
    dic_problem = dict()

    if locustpro == True:
        fail_num = 0
        dic_problem['协议'] = ['失败数目', '90%响应时间']
        for pro in df.index:
            success_rate = float(df.loc[pro, pro_success])  #【行标，列标】
            ms_90th = float(df.loc[pro, pro_90th])
            if (100 - success_rate) > 0 or ms_90th > 1000:  # 注意这里计算失败率
                if 'Type' in df.columns and df.loc[pro, 'Type'] == 'Action':
                    pro = pro[1:]
                    dic_problem[pro] = [100 - success_rate, ms_90th]
            if str(pro) == log_total:
                fail_num = 100 - success_rate
                bot_nine = ms_90th
                bot_tps = "%.1f" % float(df.loc[pro, pro_tps])
                bot_max = "%.1f" % float(df.loc[pro, pro_max])
                bot_avg = "%.1f" % float(df.loc[pro, pro_avg])
        output_total = '------------统计结果如下1-------------\n' + \
                       f'tps\t失败数目\t90%响应时间\t最大响应时间\t平均响应时间\n' + \
                       f'{bot_tps}\t{fail_num}\t{bot_nine}\t{bot_max}\t{bot_avg}'
    elif failnum:
        fail_count = 0
        dic_problem['协议'] = ['失败数目', '90%响应时间']
        for pro in df.index:
            success_rate = float(df.loc[pro, pro_success])
            ms_90th = float(df.loc[pro, pro_90th])
            if (100 - success_rate) > 0 or ms_90th > 1000:
                dic_problem[pro] = [100 - success_rate, ms_90th]
            if str(pro) == log_total:
                success_rate = float(df.loc[pro, pro_success])
                bot_nine = ms_90th
                bot_tps = "%.1f" % float(df.loc[pro, pro_tps])
                bot_max = "%.1f" % float(df.loc[pro, pro_max])
                bot_avg = "%.1f" % float(df.loc[pro, pro_avg])
        output_total = '------------统计结果如下2-------------\n' + \
                       f'{pro_tps}\t失败数目\t90%响应时间\t最大响应时间\t平均响应时间\n' + \
                       f'{bot_tps}\t{100 - success_rate}\t{bot_nine}\t{bot_max}\t{bot_avg}'
    else:
        success_rate = 0
        dic_problem['协议'] = ['成功率', '90%响应时间']
        for pro in df.index:
            success_rate = float(df.loc[pro, pro_success])
            ms_90th = float(df.loc[pro, pro_90th])
            if success_rate < 99.9 or ms_90th > 1000:
                dic_problem[pro] = [success_rate, ms_90th]
            # if str(pro) == log_total:
            #     pass
            # else:
                success_rate = float(df.loc[pro, pro_success])
                bot_nine = ms_90th
                bot_tps = "%.1f" % float(df.loc[pro, pro_tps])
                bot_max = "%.1f" % float(df.loc[pro, pro_max])
                bot_avg = "%.1f" % float(df.loc[pro, pro_avg])

        service_data = defaultdict(dict)

        workPath = os.path.dirname(log_file_path)
        log_files = [f for f in os.listdir(workPath) if f.startswith('elapse')]
        print(log_files)
        for filename in log_files:
            print("处理文件：", filename)
            try:
                proto_list = count.parse_log_file(os.path.join(workPath, filename), service_data)
                print(f"协议列表: {proto_list}")
            except Exception as e:
                print(f"处理错误 {filename}: {str(e)}")
        print(199, service_data)
        # 文件级统计
        for filename, data in service_data.items():
            print(f"\n▶️ 文件 [{filename}] 最新数据")
            print("─" * 85)
            print("{:<8} | {:<6} | {:<8} | {:<8} | {:<8} | {:<8}".format(
                "服务", "TPM", "成功率", "最大响应", "平均响应", "90%响应"))
            for name, metrics in data.items():
                print(f"{name.ljust(8)} | {metrics['tpm']:6} | {metrics['success']:7.2f}% | "
                      f"{metrics['max_time']:8}ms | {metrics['avg_time']:8}ms | {metrics['avg90_time']:8}ms")

            stats = count.calculate_stats(data)
            stats_str = "\n".join([f"{k}: {v}" for k, v in stats.items()])

    # output_total = '------------统计结果如下3-------------\n' + \
    #                f'{pro_tps}\t成功率\t90%响应时间\t最大响应时间\t平均响应时间\n' + \
    #                f'{bot_tps}\t{success_rate}\t{bot_nine}\t{bot_max}\t{bot_avg}'
    #
            output_last = stats_str + f'\n------------问题协议如下-------------\n{dic_problem}'
    # output_last =  f'\n------------问题协议如下-------------\n{dic_problem}'

    # if csv_write:
    #     problem_pro_filepath = write_csvlog(dic_problem, 'problem_pro')
    #     message_dict['problem_pro_filepath'] = problem_pro_filepath
    message_dict['output_last'] = output_last
    return message_dict

