import os
import re
from datetime import datetime

import matplotlib.pyplot as plt
import numpy as np
import openpyxl
import pandas as pd
import seaborn as sns
from openpyxl.drawing.image import Image


from collections import OrderedDict
import pytz
import matplotlib.ticker as ticker
import pandas as pd
from openpyxl import Workbook
from openpyxl.chart import LineChart, Reference
from openpyxl.utils.dataframe import dataframe_to_rows
import pandas as pd
from openpyxl import load_workbook
from openpyxl.chart import LineChart, Reference,BarChart
import xlsxwriter
from openpyxl.chart.label import DataLabel, DataLabelList
from openpyxl.drawing.spreadsheet_drawing import SpreadsheetDrawing
from openpyxl.styles import PatternFill
from copy import deepcopy
import xlwings as xw
import sys

import math


if getattr(sys, 'frozen', False):
    BASE_PATH_ROOT = os.path.dirname(sys.executable)
else:
    BASE_PATH_ROOT = os.path.dirname(os.path.dirname(__file__))

# BASE_PATH_ROOT = os.path.dirname(os.path.dirname(__file__))  # 调试路径
# BASE_PATH_ROOT = os.path.dirname(sys.executable)  # 调试路径
print("程序运行中，请稍后")
# BASE_PATH_ROOT = os.path.dirname(os.path.realpath(sys.executable))  # 打包路径
BASE_PATH_CONFIG = os.path.join(BASE_PATH_ROOT, "config")
BASE_PATH_SOURCE = os.path.join(BASE_PATH_ROOT, "source")
BASE_PATH_RESULT = os.path.join(BASE_PATH_ROOT, "result")
BASE_PATH_DATABASE = os.path.join(BASE_PATH_ROOT, "database")


def get_time(log_entry):
    #############################End dump Sat Jan 1 00:04:25 CST 2000##############################
    #############################Start dump Sat Jan 1 00:04:25 CST 2000#############################

    #############################End dump Fri Dec 31 05:56:39 CST 1999##############################
    # log_entry='#############################Start dump Sat Jan 1 00:04:25 CST 2000#############################'
    # 这里的模式是：匹配三个字母的星期缩写，一个月份缩写，日期，时间（小时:分钟:秒），时区缩写，和四位数年份
    time_pattern = r'\b(?:Wed|Thu|Fri|Sat|Sun|Mon|Tue) (?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) \d{1,2} \d{2}:\d{2}:\d{2} [A-Z]{3} \d{4}\b'
    # time_pattern = r'\b(?:Wed|Thu|Fri|Sat|Sun|Mon|Tue) (?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) \d{2} \d{2}:\d{2}:\d{2} [A-Z]{3} \d{4}\b'
    # time_pattern = r'\b(?:Wed|Thu|Fri|Sat|Sun|Mon|Tue) (?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) \d{2} \d{2}:\d{2}:\d{2} \d{4}\b'
    # 使用re.search来查找匹配的时间字符串
    match = re.search(time_pattern, log_entry)
    if match:
        time_string = match.group(0)
        return time_string
    else:
        return ""


def merge_arrays(arr1, arr2):
    return list(set(arr1 + arr2))


def _merge_top_df_with_cpu(data_frames):
    # 确定所有可能的列名
    all_columns = set()
    for df in data_frames:
        all_columns.update(df['ARGS'].astype(str))
    # all_columns = sorted(all_columns)
    # 将all_columns转换为列表
    all_columns = list(all_columns)

    # 创建一个空的 DataFrame，列名为所有可能的 ARGS 值，并指定默认数据类型为浮点数
    result_df = pd.DataFrame(0.0, index=range(len(data_frames)), columns=all_columns, dtype=float)

    # 填充 DataFrame
    for i, df in enumerate(data_frames):
        for arg, cpu in zip(df['ARGS'].astype(str), df['%CPU']):
            # 确保 cpu 是浮点数类型
            result_df.at[i, arg] = float(cpu)

    return result_df


def merge_top_df_with_cpu(data_frames, type, selected_args=None):
    # 如果没有传递selected_args或传递的是空列表，则将其设置为None
    if not selected_args or selected_args == "[]":
        selected_args = None

        # 确定所有可能的列名（不包括"Time"，因为我们会单独处理）
    all_columns = set()
    for df in data_frames:
        all_columns.update(df['ARGS'].astype(str))
    all_columns = list(all_columns)

    # 根据selected_args确定要包含的列
    if selected_args:
        filtered_columns = [arg for arg in all_columns if arg.replace("[", "").replace("]", "") in selected_args]
    else:
        filtered_columns = all_columns

        # 创建一个空的 DataFrame，列名为"Time"和过滤后的 ARGS 值，以及"total"
    result_df = pd.DataFrame(index=range(len(data_frames)), columns=["Time"] + filtered_columns + ["total"],
                             dtype=object)

    # 填充 DataFrame
    for i, df in enumerate(data_frames):
        # 提取并填充Time列
        result_df.at[i, 'Time'] = df['Time'].iloc[0]

        # 初始化一个字典来跟踪每个ARGS的累计%CPU
        cpu_dict = {arg: 0.0 for arg in filtered_columns}
        cpu_sum = 0.0
        #
        # 遍历数据帧，累加每个ARGS的%CPU（只考虑过滤后的列）
        for arg, cpu in zip(df['ARGS'].astype(str), df[f'{type}']):
            if arg in cpu_dict:
                if selected_args:
                    cpu_dict[arg] += float(cpu)
                    cpu_sum += float(cpu)
                else:
                    if 'M' in cpu:
                        cpu_value = float(cpu.replace('M', '')) * 1024  # 1M = 1024K
                        unit = 'K'  # 转换后的单位是K
                    elif 'G' in cpu:
                        cpu_value = float(cpu.replace('G', '')) * 1024 *1024 # 1G = 1024K*1024k
                        unit = 'K'  # 转换后的单位是K
                    elif 'K' in cpu:
                        cpu_value = float(cpu.replace('K', ''))
                        unit = 'K'  # 转换后的单位是K
                    else:
                        cpu_value = float(cpu)  # 如果没有单位，直接转换为 float
                        unit = ''
                    cpu_dict[arg] += float(cpu_value)
                    cpu_sum += float(cpu_value)



                # 填充ARGS列
        for arg in filtered_columns:
            result_df.at[i, arg] = cpu_dict[arg]

            # 填充total列
        result_df.at[i, 'total'] = cpu_sum
    cols = result_df.columns.tolist()
    cols = cols[:1] + ['total'] + cols[1:-1]
    result_df = result_df[cols]
    return result_df


def merge_hog_df_with_cpu(data_frames, columns_name, type, selected_args=None, is_total=False):
    # 如果没有传递selected_args或传递的是空列表，则将其设置为None
    if not selected_args or selected_args == "[]":
        selected_args = None

        # 确定所有可能的列名（不包括"Time"，因为我们会单独处理）
    all_columns = set()
    for df in data_frames:
        all_columns.update(df[f'{columns_name}'].astype(str))
    all_columns = list(all_columns)

    # 根据selected_args确定要包含的列
    if selected_args:
        filtered_columns = [arg for arg in all_columns if arg.replace("[", "").replace("]", "") in selected_args]
    else:
        filtered_columns = all_columns

        # 创建一个空的 DataFrame，列名为"Time"和过滤后的 ARGS 值，以及"total"
    if is_total:
        result_df = pd.DataFrame(index=range(len(data_frames)), columns=["Time"] + ["total"] + filtered_columns,
                                 dtype=object)
    else:
        result_df = pd.DataFrame(index=range(len(data_frames)), columns=["Time"] + filtered_columns,
                                 dtype=object)

        # 填充 DataFrame
    for i, df in enumerate(data_frames):
        # 提取并填充Time列
        result_df.at[i, 'Time'] = df['Time'].iloc[0]
        # 提取并填充total列
        # 判断df中是否包含total列
        if 'total' in df.columns:
            result_df.at[i, 'total'] = df[f'total'].iloc[0]

        # 初始化一个字典来跟踪每个ARGS的累计%CPU
        cpu_dict = {arg: 0.0 for arg in filtered_columns}

        # 遍历数据帧，累加每个ARGS的%CPU（只考虑过滤后的列）
        for arg, cpu in zip(df[f'{columns_name}'].astype(str), df[f'{type}']):
            if arg in cpu_dict:
                # 去掉百分号，只保留数字部分,为excel生成折线图使用
                cpu_dict[arg] = cpu.rstrip('%')

        # 填充ARGS列
        for arg in filtered_columns:
            result_df.at[i, arg] = cpu_dict[arg]

    return result_df


def top_line_match(line):
    time_pattern = re.compile(r'(\d{1}:\d{2}\.\d{2})')
    matches = time_pattern.finditer(line)

    last_match = None
    for match in matches:
        last_match = match

    if last_match:
        # 找到时间字符串的结束位置
        time_end_index = last_match.end()

        # 分割字符串：时间字符串之前的部分和时间字符串之后的部分
        before_time = line[:time_end_index].strip()  # 减去1以避免包含时间后的空格

        after_time = line[time_end_index:].rsplit()
        # print(after_time)
        # 将时间字符串之前的部分按照空格分割成列表
        before_time_list = before_time.split()

        # 将时间字符串作为独立元素添加到列表中，然后添加之后的部分
        result = before_time_list
        result.append(after_time[0])
        return result
    else:
        return ["", "", "", "", "", "", "", "", "", "", "", ""]


def hogs_line_match(line):
    # 按照空格分隔成列表
    line_list = line.split()
    # 去除掉列表中的""元素
    line_list = [i for i in line_list if i != ""]
    # 如果列表长度小于7，则用空字符串填充
    if len(line_list) < 7:
        line_list += [""] * (7 - len(line_list))
    return line_list


def showmem_line_match(line):
    # 按照空格分隔成列表
    line_list = line.split()
    # 去除掉列表中的""元素
    line_list = [i for i in line_list if i != ""]
    # 如果列表长度小于4，则用空字符串填充
    if len(line_list) < 4:
        line_list += [""] * (4 - len(line_list))
    return line_list


def df_line_match(line):
    # 按照空格分隔成列表
    line_list = line.split()
    # 去除掉列表中的""元素
    line_list = [i for i in line_list if i != ""]
    # 如果列表长度小于6，则用空字符串填充
    if len(line_list) < 6:
        line_list += [""] * (6 - len(line_list))
    return line_list


def processing_time(time_str):
    if "CST" in time_str:
        new_time_str = time_str.replace("CST", "")
        time_format = "%a %b %d %H:%M:%S %Y"
    elif "GMT" in time_str:
        new_time_str = time_str.replace("GMT", "")
        time_format = "%a %b %d %H:%M:%S %Y"
    else:
        new_time_str = time_str
        time_format = "%b %d %H:%M:%S"

    # 解析时间字符串
    try:
        dt = datetime.strptime(new_time_str, time_format)
    except ValueError as e:
        print(f"Error parsing time string: {e}")
        return None  # 或者你可以返回一个错误码或抛出异常

    # 转换为时间戳
    timestamp = dt.timestamp()

    return int(timestamp)




def draw_polyline(df, title, sheet_name,y_axis_rotation=False):
    # 设置画布大小（增加宽度）
    fig, ax = plt.subplots()
    # fig, ax = plt.subplots(figsize=(12, 8))  # 设置宽为12英寸，高为8英寸

    # 设置Seaborn的样式
    sns.set(style="whitegrid")

    # 设置字体以显示中文
    plt.rcParams['font.sans-serif'] = ['SimHei']
    plt.rcParams['axes.unicode_minus'] = False
    plt.yscale('linear')


    # 旋转X轴标签90度并减小字体大小
    plt.xticks(rotation=90, fontsize=8)
    #
    # # 设置x轴的步长为10
    # ax.xaxis.set_major_locator(ticker.MultipleLocator(10))
    #
    # # 继续设置x轴的标签和旋转等
    # plt.xticks(rotation=90, fontsize=8)

    # 添加标题和轴标签
    plt.title(title)
    plt.xlabel('Time')
    plt.ylabel('Value')

    # 根据sheet_name选择要绘制的列
    if sheet_name == "Meminfo-total":
        yticks = np.arange(0, 272930368, 27293036)
        ytick_labels = [str(tick) for tick in yticks]
        ax.set_yticks(yticks)
        ax.set_yticklabels(ytick_labels)

    columns = df.columns[1:]
    for column in columns:
        # sns.lineplot(x='Time', y=column, data=df, label=column)
        # 去除df[column]的百分号
        ax.plot(df['Time'].apply(processing_time), df[column].astype("str").apply(lambda x: x.replace('%', '')),
                label=column)
        # ax.plot(df['Time'].apply(processing_time), df[column].apply(), label=column)
    if y_axis_rotation:
        # 反转y轴方向，使最大值显示在底部，最小值显示在顶部
        ax.invert_yaxis()


    # plt.ylim(global_min, global_max )
    plt.autoscale(enable=True, axis='y')
    plt.autoscale(enable=True, axis='x')
    # 添加图例
    legend = plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')

    # 保存图表为图片文件
    save_path = os.path.join(BASE_PATH_RESULT, f"{sheet_name}.png")
    plt.savefig(save_path, bbox_inches='tight')
    plt.close()  # 关闭图形，释放资源

    return save_path

# 绘制柱状图  表示从第几个之前的列不绘制 column =1 表示 0,1 钱两个列
def draw_bar_chart(df,display_frame_time_list, title, sheet_name,column):
    # 创建 DataFrame
    profiledata_info_df = df

    # 删除 Time 和 Other&Delay 列
    # axis = 1 表示列
    profiledata_info_df = profiledata_info_df.drop(profiledata_info_df.columns[[0, column]], axis=1)

    # 绘制堆叠柱状图 color 暂不设置
    ax = profiledata_info_df.plot(kind='bar', stacked=True, figsize=(150 if len(df) > 500 else 10, 6), width=0.3, edgecolor='none')


    # 获取数据中的最大值，计算合适的Y轴范围
    max_value = profiledata_info_df.sum(axis=1).max()  # 计算每行的总和并找出最大值
    count_list=profiledata_info_df.sum(axis=1).tolist()
    # 使用列表推导式找出所有大于 83330000 的数的索引
    indices_83 = [i for i, value in enumerate(display_frame_time_list) if value > 83330000]
    indices_125 = [i for i, value in enumerate(display_frame_time_list) if value > 125000000]


    jank=[]
    big_jank=[]
    if len(count_list)==len(display_frame_time_list):
        if indices_83:
            for index in indices_83:
                if index >=3:
                    first_three_values = count_list[index-3:index]
                    average_of_first_three = sum(first_three_values) / len(first_three_values)
                    if count_list[index]>average_of_first_three*2:
                        jank.append(index)
                    pass
        if indices_125:
            for index in indices_125:
                if index >= 3:
                    first_three_values = count_list[index - 3:index]
                    average_of_first_three = sum(first_three_values) / len(first_three_values)

                    if count_list[index] > average_of_first_three*2:
                        big_jank.append(index)
    # 动态设置Y轴范围
    ax.set_ylim(0, max_value *1.1)  # 设置最大值为最大总和的 110%
    # 设置Y轴单位为5000000，并格式化
    ax.yaxis.set_major_formatter(plt.FuncFormatter(lambda x, _: f'{int(x)}'))



    #
    # # 设置 X 轴刻度和标签
    # step = 5  # 每隔5个显示一个标签
    # ax.set_xticks(range(0, len(display_frame_time_list), step))
    # ax.set_xticklabels(display_frame_time_list[::step], rotation=45, ha='right')

    # step = 1  # 每个刻度之间的步长
    # x_ticks = range(len(display_frame_time_list))  # 设置刻度为从0到列表长度
    # ax.set_xticks(x_ticks)  # 设置刻度位置
    # ax.set_xticklabels(x_ticks, rotation=45, ha='right')  # 将刻度标签设置为整数序列

    # 设置 X 轴的步长刻度
    step = 5
    x_ticks = range(0, len(display_frame_time_list), step)  # 这里添加了步长（step）
    ax.set_xticks(x_ticks)  # 设置刻度位置
    ax.set_xticklabels([str(i) for i in x_ticks], rotation=45, ha='right')  # 将刻度标签设置为整数序列


    # 添加标题和标签
    plt.title(title)
    plt.xlabel('Time')
    plt.ylabel('Values')


    jank = [x for x in jank if x not in big_jank]
    # 添加严重标识（例如，第 i 个柱状条）
    if jank:
        for index in jank:
            severity_index=index
            if severity_index is not None and severity_index < len(profiledata_info_df):
                # 获取该柱状条的高度
                severity_value = profiledata_info_df.iloc[severity_index].sum()
                ax.text(severity_index, severity_value, 'jank', ha='center', va='bottom', color='blue', fontsize=12)

    if big_jank:
        for index in big_jank:
            severity_index=index
            if severity_index is not None and severity_index < len(profiledata_info_df):
                # 获取该柱状条的高度
                severity_value = profiledata_info_df.iloc[severity_index].sum()
                ax.text(severity_index, severity_value, 'bigjank', ha='center', va='bottom', color='red', fontsize=12)

    # # 显示图表 开发使用 不打开
    # plt.tight_layout()
    # plt.show()
    #
    plt.autoscale(enable=True, axis='y')
    plt.autoscale(enable=True, axis='x')
    # 添加图例
    legend = plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')

    # 保存图表为图片文件
    save_path = os.path.join(BASE_PATH_RESULT, f"{sheet_name}.png")
    plt.savefig(save_path, bbox_inches='tight')
    plt.close()  # 关闭图形，释放资源
    return save_path


def draw_a(profiledata_info_df):
    # # 1. 准备数据
    # data = {
    #     '时间': ['1月', '2月', '3月', '4月'],
    #     '销售量': [100, 150, 120, 180]
    # }

    # 将数据转换为 DataFrame
    df = profiledata_info_df

    # 2. 创建Excel工作簿并将数据写入
    wb = Workbook()
    ws = wb.active
    ws.title = "数据"

    # 将 DataFrame 数据写入工作表
    for row in dataframe_to_rows(df, index=False, header=True):
        ws.append(row)

    # 3. 创建二维折线图
    chart = LineChart()
    chart.title = "性能数据折线图"
    chart.style = 13

    # 设置坐标轴标题
    chart.x_axis.title = 'Time'
    chart.y_axis.title = 'Values'

    # # 确保坐标轴显示，并设置坐标轴的显示选项
    # chart.x_axis.majorGridlines = None  # 可以去掉网格线（如果不需要的话）
    # chart.y_axis.majorGridlines = None  # 可以去掉网格线（如果不需要的话）


    # 4. 设置数据区域
    # 选择数值数据范围（从第二列到最后一列，行数包括标题行）
    data = Reference(ws, min_col=2, min_row=1, max_col=8, max_row=len(df) + 1)

    # 设置X轴的分类数据，确保指向 'Time' 列
    categories = Reference(ws, min_col=1, min_row=2, max_row=len(df) + 1)


    # 5. 将数据和类别添加到图表
    chart.add_data(data, titles_from_data=True)
    chart.set_categories(categories)
    # 设置 X 轴的刻度标签位置
    chart.x_axis.majorTickMark = 'out'
    chart.y_axis.majorTickMark = 'out'

    # 6. 将图表插入到Excel中
    ws.add_chart(chart, "J5")

    # 7. 保存工作簿
    wb.save("多列折线图.xlsx")

def draw_b(profiledata_info_df,sheet_name,table_name):
    # 将数据转换为 DataFrame
    df = profiledata_info_df

    # 1. 加载已存在的 Excel 文件
    # wb = Workbook('Android_status.xlsx')
    report_path = os.path.join(BASE_PATH_RESULT, "Android_status.xlsx")
    wb = openpyxl.load_workbook(report_path)

    # 2. 选择目标工作表 "CPU"
    if sheet_name not in wb.sheetnames:
        # 如果没有 "CPU" 工作表，创建一个新的工作表
        ws = wb.create_sheet(sheet_name)
    else:
        # 如果 "CPU" 工作表已经存在，选择它
        ws = wb[sheet_name]

    # # 3. 将 DataFrame 数据写入工作表
    # for row in dataframe_to_rows(df, index=False, header=True):
    #     ws.append(row)

    # 4. 创建二维折线图
    chart = LineChart()
    chart.title = table_name
    # chart.style = 13

    # 设置坐标轴标题
    chart.x_axis.title = 'Time'
    chart.y_axis.title = 'Values'

    # 5. 设置数据区域
    # 选择数值数据范围（从第二列到最后一列，行数包括标题行）
    data = Reference(ws, min_col=3, min_row=1, max_col=254, max_row=len(df) + 1)

    # 设置X轴的分类数据，确保指向 'Time' 列
    categories = Reference(ws, min_col=1, min_row=2, max_row=len(df) + 1)

    # 6. 将数据和类别添加到图表
    chart.add_data(data, titles_from_data=True)
    chart.set_categories(categories)

    # 设置 X 轴和 Y 轴的刻度标签位置
    chart.x_axis.majorTickMark = 'out'
    chart.y_axis.majorTickMark = 'out'

    # 7. 将图表插入到 Excel 中
    ws.add_chart(chart, "J5")

    # 8. 保存工作簿
    wb.save('Android_status.xlsx')

def draw_c(sheet_name,start_col=2,table_name="Table Name"):
    # 读取 Excel 文件
    file_path = os.path.join(BASE_PATH_RESULT, "QNX_status.xlsx")

    # 读取指定的 sheet
    df = pd.read_excel(file_path, sheet_name=sheet_name)

    # 加载现有的 Excel 文件
    wb = load_workbook(file_path)
    ws = wb[sheet_name]

    # 将所有数据写回 Excel
    for i, column in enumerate(df.columns, start=1):
        for j, value in enumerate(df[column], start=2):  # 从第二行开始写入
            ws.cell(row=j, column=i, value=value)

    # 创建折线图
    chart = LineChart()
    chart.title = table_name
    chart.x_axis.title = "Time"
    chart.y_axis.title = "Value"

    # 设置图表数据范围
    data = Reference(ws, min_col=start_col, min_row=1, max_col=len(df.columns), max_row=len(df) + 1)
    chart.add_data(data, titles_from_data=True)

    # 设置 x 轴数据为 第一列
    categories = Reference(ws, min_col=1, min_row=2, max_row=len(df) + 1)
    chart.set_categories(categories)

    # 将图表插入到工作表
    ws.add_chart(chart, "G7")  # 将图表插入到 G7 位置

    # 保存 Excel 文件，覆盖原文件
    wb.save(file_path)
    # 直接保存为原文件
    pass

def draw_QNX_Status(sheet_name):
    # 读取 Excel 文件
    file_path = 'QNX_status.xlsx'  # 请替换为你的文件路径
    sheet_name = sheet_name  # 请确保表名为 'CPU'

    # 读取指定的 sheet
    df = pd.read_excel(file_path, sheet_name=sheet_name)

    # 提取数字并将单位（k, M, %等）去除
    def extract_number(value):
        if isinstance(value, str):
            # 使用正则表达式提取数字部分，忽略任何单位
            match = re.search(r'([0-9.]+)', value)
            if match:
                return float(match.group(1))
        elif isinstance(value, (int, float)):
            return value
        return None  # 如果无法转换为数字，返回 None

    # 对每一列应用 extract_number 函数
    df = df.applymap(extract_number)

    # 加载现有的 Excel 文件
    wb = load_workbook(file_path)
    ws = wb[sheet_name]

    # 将所有数据写回 Excel
    for i, column in enumerate(df.columns, start=1):
        for j, value in enumerate(df[column], start=2):  # 从第二行开始写入
            ws.cell(row=j, column=i, value=value)

    # 创建折线图
    chart = LineChart()
    chart.title = "Table Name"
    chart.x_axis.title = "Time"
    chart.y_axis.title = "Value"

    # 设置图表数据范围
    data = Reference(ws, min_col=2, min_row=1, max_col=len(df.columns), max_row=len(df) + 1)
    chart.add_data(data, titles_from_data=True)

    # 设置 x 轴数据为 第一列
    categories = Reference(ws, min_col=1, min_row=2, max_row=len(df) + 1)
    chart.set_categories(categories)

    # 将图表插入到工作表
    ws.add_chart(chart, "G7")  # 将图表插入到 G7 位置

    # 保存 Excel 文件，覆盖原文件
    wb.save(file_path)
    pass


def draw_line_chart():
    # 读取现有的 Excel 文件
    file_path = 'Android_status.xlsx'



    sheet_name = 'Meminfo-total'  # 请确保表名为'CPU'

    # 读取指定的 sheet
    df = pd.read_excel(file_path, sheet_name=sheet_name)

    # 加载现有的 Excel 文件
    wb = load_workbook(file_path)
    ws = wb[sheet_name]

    # 将所有数据写回 Excel
    for i, column in enumerate(df.columns, start=1):
        for j, value in enumerate(df[column], start=2):  # 从第二行开始写入
            ws.cell(row=j, column=i, value=value)

    # 创建折线图
    chart = LineChart()
    chart.title = "Table Name"
    chart.x_axis.title = "Time"
    chart.y_axis.title = "Value"

    # 设置图表数据范围
    data = Reference(ws, min_col=2, min_row=1, max_col=len(df.columns), max_row=len(df) + 1)
    chart.add_data(data, titles_from_data=True)

    # 设置 x 轴数据为 第一列
    categories = Reference(ws, min_col=1, min_row=2, max_row=len(df) + 1)
    chart.set_categories(categories)

    # 将图表插入到工作表
    ws.add_chart(chart, "G7")  # 将图表插入到 G7 位置

    # 保存 Excel 文件，覆盖原文件
    wb.save(file_path)
    # 直接保存为原文件
    pass

def draw_line_chart2(sheet_name,start_col=2,table_name="Table Name",start_row=1):
    # 读取 Excel 文件
    file_path = os.path.join(BASE_PATH_RESULT, "Android_status.xlsx")  # 请替换为你的文件路径
    # sheet_name = 'GPU'  # 请确保表名为'CPU'

    # 读取指定的 sheet
    df = pd.read_excel(file_path, sheet_name=sheet_name)

    # 加载现有的 Excel 文件
    wb = load_workbook(file_path)
    ws = wb[sheet_name]

    # 将所有数据写回 Excel
    for i, column in enumerate(df.columns, start=1):
        for j, value in enumerate(df[column], start=2):  # 从第二行开始写入
            ws.cell(row=j, column=i, value=value)

    # 创建折线图
    chart = LineChart()
    chart.title =table_name
    chart.x_axis.title = "Time"
    chart.y_axis.title = "Value"

    # 设置图表数据范围
    data = Reference(ws, min_col=start_col, min_row=start_row, max_col = 255 if len(df.columns) > 255 else len(df.columns), max_row=len(df) + 1)
    chart.add_data(data, titles_from_data=True)

    # 设置 x 轴数据为 第一列
    categories = Reference(ws, min_col=1, min_row=start_row+1, max_row=len(df) + 1)
    chart.set_categories(categories)

    # 将图表插入到工作表
    ws.add_chart(chart, "G7")  # 将图表插入到 G7 位置

    pass

    # Assuming df is your DataFrame and ws is the worksheet
    num_columns = len(df.columns)

    # Calculate how many charts you need based on the number of columns
    num_charts = math.floor(num_columns / 254)

    start_row = 1  # Adjust if you need to start from a specific row
    # table_name = "Chart Title"  # Customize your chart title

    for i in range(num_charts):
        # Calculate the start and end columns for each chart
        start_col = 255 + (i * 254)
        end_col = min(255 + ((i + 1) * 254), num_columns + 1)

        # Create a chart only if the number of columns exceeds 254
        if num_columns > 254 * i:
            chart = LineChart()
            chart.title = table_name
            chart.x_axis.title = "Time"
            chart.y_axis.title = "Value"

            # Set chart data range (dynamic range based on current chunk of columns)
            data = Reference(ws, min_col=start_col, min_row=1, max_col=end_col, max_row=len(df) + 1)
            chart.add_data(data, titles_from_data=True)

            # Set x-axis data (use first column for time)
            categories = Reference(ws, min_col=1, min_row=start_row + 1, max_row=len(df) + 1)
            chart.set_categories(categories)

            # Insert the chart in the worksheet (adjust the position for each chart)
            ws.add_chart(chart, f"G{27 + i * 20}")  # Move each chart downward to avoid overlap



    # 保存 Excel 文件，覆盖原文件
    wb.save(file_path)
    # 直接保存为原文件
    pass

def draw_overlay_bar_chart(sheet_name):
    # 读取 Excel 文件
    file_path = os.path.join(BASE_PATH_RESULT, "Android_status.xlsx")  # 请替换为你的文件路径
    sheet_name = sheet_name  # 请确保表名为'FPS'

    # 读取指定的 sheet
    df = pd.read_excel(file_path, sheet_name=sheet_name)

    # 加载现有的 Excel 文件
    wb = load_workbook(file_path)
    ws = wb[sheet_name]

    # 将所有数据写回 Excel
    for i, column in enumerate(df.columns, start=1):
        for j, value in enumerate(df[column], start=2):  # 从第二行开始写入
            ws.cell(row=j, column=i, value=value)

    chart1 = BarChart()

    chart1.type = "col"
    chart1.style = 10
    chart1.title = "FPS总览（K）"
    chart1.y_axis.title = 'Value'
    chart1.x_axis.title = 'Time'

    max_col = max(len(df.columns) - 1, 1)

    # Ensure max_row is at least 1
    max_row = max(len(df) + 1, 1)

    # Now create the reference object
    data = Reference(ws, min_col=12, min_row=1, max_col=max_col, max_row=max_row)
    cats = Reference(ws, min_col=1, min_row=2, max_row=max_row)
    chart1.add_data(data, titles_from_data=True)
    chart1.set_categories(cats)
    chart1.shape = 4


    # 如果需要，可以为系列指定颜色
    # chart1.series[0].graphicalProperties.line.solidFill = "0000FF"  # 蓝色
    # chart1.series[1].graphicalProperties.line.solidFill = "FF0000"  # 红色
    # chart1.series[2].graphicalProperties.line.solidFill = "0000FF"  # 蓝色
    # chart1.series[3].graphicalProperties.line.solidFill = "FF9900"  # 橙色
    # chart1.series[4].graphicalProperties.line.solidFill = "FF00FF"  # 品红
    # chart1.series[5].graphicalProperties.line.solidFill = "00FFFF"  # 青色
    # chart1.series[6].graphicalProperties.line.solidFill = "FFD700"  # 金色
    # chart1.series[7].graphicalProperties.line.solidFill = "00FF00"  # 绿色
    # chart1.series[8].graphicalProperties.line.solidFill = "0000FF"  # 蓝色
    # chart1.series[9].graphicalProperties.line.solidFill = "0000FF"  # 蓝色

    chart1.series[0].graphicalProperties.line.solidFill = "00FF00"  # 绿色
    chart1.series[1].graphicalProperties.line.solidFill = "0000FF"  # 蓝色
    chart1.series[2].graphicalProperties.line.solidFill = "FF9900"  # 橙色
    chart1.series[3].graphicalProperties.line.solidFill = "FF0000"  # 红色


    chart3 = deepcopy(chart1)
    chart3.type = "col"
    chart3.style = 2
    chart3.grouping = "stacked"
    chart3.overlap = 100
    chart3.title = 'FPS总览'



    ws.add_chart(chart3, "G7")

    wb.save(file_path)
    pass

def draw_overlay_bar_chart2():
    # 读取 Excel 文件
    file_path = os.path.join(BASE_PATH_RESULT, "Android_status.xlsx")  # 请替换为你的文件路径
    sheet_name = 'FPS'  # 请确保表名为'GPU'

    # 读取指定的 sheet
    df = pd.read_excel(file_path, sheet_name=sheet_name)

    # 加载现有的 Excel 文件
    wb = load_workbook(file_path)
    ws = wb[sheet_name]

    # 将所有数据写回 Excel
    for i, column in enumerate(df.columns, start=1):
        for j, value in enumerate(df[column], start=2):  # 从第二行开始写入
            ws.cell(row=j, column=i, value=value)

    # 创建叠加柱状图
    chart = BarChart()
    chart.type = "col"  # 设置为柱状图
    chart.stacked = True  # 设置为叠加柱状图
    chart.title = "Table Name"
    chart.x_axis.title = "Time"
    chart.y_axis.title = "Value"

    # 设置图表数据范围
    data = Reference(ws, min_col=3, min_row=1, max_col=len(df.columns), max_row=len(df) + 1)
    chart.add_data(data, titles_from_data=True)

    # 设置 x 轴数据为 第一列
    categories = Reference(ws, min_col=1, min_row=2, max_row=len(df) + 1)
    chart.set_categories(categories)

    # 将图表插入到工作表
    ws.add_chart(chart, "G7")  # 将图表插入到 G7 位置

    # 保存 Excel 文件，覆盖原文件
    wb.save(file_path)
    pass

# def handling_fps(display_frame_time_list):
#     x_percentage, failed_list_count, bigjank_list_count, jank_list_count, total_count = add_display_frame_time_list(
#         display_frame_time_list)
#     draw_overlay_bar_chart()
#     AutoFilter()
#     add_FPS_info(x_percentage, failed_list_count, bigjank_list_count, jank_list_count, total_count)


def add_display_frame_time_list(display_frame_time_list,sheet_name):
    # 1. 根据display_frame_time_list为每个条件生成新的列数据
    failed_list = [
        "0" if num > 64000000 else ""
        for num in display_frame_time_list
    ]
    bigjank_list = [
        "0" if 64000000 > num > 48000000 else ""
        for num in display_frame_time_list
    ]
    jank_list = [
        "0" if 48000000 > num > 32000000 else ""
        for num in display_frame_time_list
    ]


    # 2. 读取Excel文件中的指定工作表
    file_path = os.path.join(BASE_PATH_RESULT, "Android_status.xlsx")  # 请替换为你的文件路径
    # sheet_name = 'FPS'  # 目标工作表名
    df = pd.read_excel(file_path, sheet_name=sheet_name)
    #
    # # 3. 创建新列并分别添加每个条件的结果
    # df['Jank'] = jank_list
    # df['Bigjank'] = bigjank_list
    # df['Failed'] = failed_list



    # # 4. 使用pandas的ExcelWriter并设置`if_sheet_exists='replace'`来覆盖现有工作表
    # with pd.ExcelWriter(file_path, engine='openpyxl', mode='a', if_sheet_exists='replace') as writer:
    #     # 将修改后的数据写入原文件
    #     df.to_excel(writer, sheet_name=sheet_name, index=False)

    # jank_list_count = sum(1 for num in display_frame_time_list if 48000000 > num > 32000000)
    # bigjank_list_count = sum(1 for num in display_frame_time_list if 64000000 > num > 48000000)
    # failed_list_count = sum(1 for num in display_frame_time_list if num > 64000000)
    normal_list_count=0
    if 'normal total' in df.columns:
        normal_list_count = (df["normal total"] >= 0).sum()
        normal_list = df[df["normal total"] > 0]["normal total"].tolist()
    if 'jank total' in df.columns:
        jank_list_count = (df["jank total"] > 0).sum()
        jank_list = df[df["jank total"] > 0]["jank total"].tolist()
    if 'bigjank total' in df.columns:
        bigjank_list_count = (df["bigjank total"] > 0).sum()
        bigjank_list = df[df["bigjank total"] > 0]["bigjank total"].tolist()
    if 'failed total' in df.columns:
        failed_list_count = (df["failed total"] > 0).sum()
        failed_list = df[df["failed total"] > 0]["failed total"].tolist()

    toal_count=((normal_list_count - len(jank_list) - len(bigjank_list) - len(failed_list)) + total_multiples(jank_list) + total_multiples(bigjank_list) + total_multiples(failed_list))
    if normal_list_count != 0:
        # x = (normal_list_count - jank_list_count * 2 - bigjank_list_count * 3 - failed_list_count * 4) / normal_list_count
        x=(normal_list_count - len(jank_list) - len(bigjank_list) - len(failed_list))/toal_count

    else:
        # 处理 len(display_frame_time_list) 为零的情况
        x = 0  # 或者其他合适的默认值

    x_percentage = round(x * 100, 2)




    return x_percentage,failed_list,bigjank_list,jank_list,toal_count,normal_list_count

    pass

def total_multiples(list):
    # 将空字符串过滤掉并转换为零
    multiples_of_16000000 = [math.ceil(float(x) / 16600000) if x != '' else 0 for x in list]
    total_multiples = sum(multiples_of_16000000)
    return total_multiples
    pass

def add_display_frame_time_list2(display_frame_time_list):
    modified_list = [
        "failed" if num > 64000000 else
        "Bigjank" if num > 48000000 else
        "jank" if num > 32000000 else
        ""
        for num in display_frame_time_list
    ]

    # 1. 读取Excel文件中的指定工作表
    file_path = os.path.join(BASE_PATH_RESULT, "Android_status.xlsx")  # 请替换为你的文件路径
    sheet_name = 'FPS'  # 目标工作表名
    df = pd.read_excel(file_path, sheet_name=sheet_name)

    # 2. 创建新列数据（可以根据需要修改列内容）
    new_column_data =modified_list # 示例新列数据
    df['NewColumn'] = new_column_data  # 添加新列

    # 3. 使用pandas的ExcelWriter并设置`if_sheet_exists='replace'`来覆盖现有工作表
    with pd.ExcelWriter(file_path, engine='openpyxl', mode='a', if_sheet_exists='replace') as writer:
        # 将修改后的数据写入原文件
        df.to_excel(writer, sheet_name=sheet_name, index=False)

    print("新列已成功添加到指定工作表。")
    pass

def process_excel_data(sheet_name):

    workbook = load_workbook(filename=os.path.join(BASE_PATH_RESULT, "QNX_status.xlsx"))

    # 尝试获取名为 "CPU" 的工作表
    if sheet_name in workbook.sheetnames:
        sheet = workbook[sheet_name]  # 获取名为 "CPU" 的工作表
    else:
        print("没有找到名为 'CPU' 的工作表")
        sheet = None

    # 如果找到工作表，修改其内容
    if sheet:
        # 获取工作表的行和列的最大数量
        max_row = sheet.max_row
        max_column = sheet.max_column

        # 遍历数据区域，跳过第一行和第一列
        for row in range(2, max_row + 1):  # 从第二行开始
            for col in range(2, max_column + 1):  # 从第二列开始
                cell = sheet.cell(row=row, column=col)
                cell_value = str(cell.value)  # 将单元格的值转为字符串

                # 判断是否为数字+字母(G/M/k)的形式
                if re.match(r"^\d+(\.\d+)?(G|M|K|k)$", cell_value):
                    number = float(cell_value[:-1])  # 提取数字部分
                    suffix = cell_value[-1]  # 提取字母部分

                    # 根据后缀进行不同处理
                    if suffix == "G":
                        new_value = number * 1024 *1024
                    elif suffix == "M":
                        new_value = number * 1024
                    elif suffix == "k":
                        new_value = number

                    # 更新单元格值
                    cell.value = new_value

    workbook.save(filename=os.path.join(BASE_PATH_RESULT, "QNX_status.xlsx"))  # 保存修改

def process_excel_data2(sheet_name):

    workbook = load_workbook(filename=os.path.join(BASE_PATH_RESULT, "Android_status.xlsx"))

    # 尝试获取名为 "CPU" 的工作表
    if sheet_name in workbook.sheetnames:
        sheet = workbook[sheet_name]  # 获取名为 "CPU" 的工作表
    else:
        print(f"没有找到名为{sheet_name}的工作表")
        sheet = None

    # 如果找到工作表，修改其内容
    if sheet:
        # 获取工作表的行和列的最大数量
        max_row = sheet.max_row
        max_column = sheet.max_column

        # 遍历数据区域，跳过第一行和第一列
        for row in range(2, max_row + 1):  # 从第二行开始
            for col in range(2, max_column + 1):  # 从第二列开始
                cell = sheet.cell(row=row, column=col)
                cell_value = str(cell.value)  # 将单元格的值转为字符串

                # 判断是否为数字+字母(G/M/k)的形式
                if re.match(r"^\d+(\.\d+)?(G|M|K|k)$", cell_value):
                    number = float(cell_value[:-1])  # 提取数字部分
                    suffix = cell_value[-1]  # 提取字母部分

                    # 根据后缀进行不同处理
                    if suffix == "G":
                        new_value = number * 1024 *1024
                    elif suffix == "M":
                        new_value = number * 1024
                    elif suffix == "k":
                        new_value = number

                    # 更新单元格值
                    cell.value = new_value

    workbook.save(filename=os.path.join(BASE_PATH_RESULT, "Android_status.xlsx"))  # 保存修改


def AutoFilter(sheet_name):
    workbook_path = os.path.join(BASE_PATH_RESULT, "Android_status.xlsx")  # 请确保这是正确的文件路径
    app = xw.App(visible=False)  # 隐藏 Excel 应用
    wb = app.books.open(workbook_path)
    sheet = wb.sheets[sheet_name]

    # 获取用于筛选的范围 (假设第一行为标题行)
    used_range = sheet.api.UsedRange
    first_row = used_range.Rows(1)  # 第一行用于标题
    first_row.AutoFilter(10)  # 第2列不等于Y的数据

    # 可以选择保存或保持文件打开
    wb.save()
    wb.close()
    app.quit()
    pass
def draw_test2():
    wb = Workbook(write_only=True)
    ws = wb.create_sheet()

    rows = [
        ('Number', 'Batch 1', 'Batch 2', 'Batch 3'),
        (2, 10, 30),
        (39, 40, 60, 2),
        (4, 50, 70),
        (58, 20, 10),
        (66, 10, 40, 0),
        (7, 50, 30),
        (8, 10, 10, 17),
    ]

    for row in rows:
        ws.append(row)

    chart1 = BarChart()

    chart1.type = "col"
    chart1.style = 10
    chart1.title = "Bar Chart"
    chart1.y_axis.title = 'Test number'
    chart1.x_axis.title = 'Sample length (mm)'

    data = Reference(ws, min_col=2, min_row=1, max_row=8, max_col=4)
    cats = Reference(ws, min_col=1, min_row=2, max_row=8)
    chart1.add_data(data, titles_from_data=True)
    chart1.set_categories(cats)
    chart1.shape = 4

    from openpyxl.chart.label import DataLabelList
    # chart1.series[0].dLbls = DataLabelList()
    # chart1.series[0].dLbls.showVal = True  # 显
    #
    #
    # chart1.series[1].dLbls = DataLabelList()
    # chart1.series[1].dLbls.showVal = True  # 显

    chart1.series[2].dLbls = DataLabelList()
    chart1.series[2].dLbls.showVal = True  # 显
    from copy import deepcopy

    chart3 = deepcopy(chart1)
    chart3.type = "col"
    chart3.style = 10
    chart3.grouping = "stacked"
    chart3.overlap = 100
    chart3.title = 'Stacked Chart'

    ws.add_chart(chart3, "G7")

    wb.save("bar.xlsx")
    pass

def add_FPS_info(sheet_name,fps_sheet_name,x_percentage,failed_list_count,bigjank_list_count,jank_list_count,total_count,normal_list_count):
    # 加载现有的 Excel 文件
    file_path = os.path.join(BASE_PATH_RESULT, "Android_status.xlsx")
    wb = load_workbook(file_path)

    # 选择 'FPS' 工作表
    if sheet_name in wb.sheetnames:
        ws = wb[sheet_name]
    else:
        print(f"没有找到 {sheet_name} 工作表")
        wb.close()
        exit()


    target = '不满足abc任意条件'
    if len(jank_list_count)+ len(bigjank_list_count)+ len(failed_list_count) >4:
        target='不达标'
    elif x_percentage>95 :
        target = '通过'
    elif 95>x_percentage>85 :
        target = '有条件通过'

    ws['R2'] = f'包名：{fps_sheet_name}'
    ws['R3'] = f'总帧数：{normal_list_count}'
    ws['T3'] = f'换算后总帧数：{total_count}'
    ws['R4'] = f'jank数：{len(jank_list_count)}'
    ws['T4'] = f'换算后jank数：{total_multiples(jank_list_count)}'
    ws['R5'] = f'bigjank数：{len(bigjank_list_count)}'
    ws['T5'] = f'换算后bigjank数：{total_multiples(bigjank_list_count)}'
    ws['R6'] = f'failed数：{len(failed_list_count)}'
    ws['T6'] = f'换算后failed数：{total_multiples(failed_list_count)}'
    ws['R7'] = f'最大卡顿帧：{next((v for c, v in [(len(failed_list_count), max(failed_list_count) / 16000000 if failed_list_count else 0), (len(bigjank_list_count), 3), (len(jank_list_count), 2)] if c > 0), 0)}'
    ws['R8'] = f'流畅率：{x_percentage}%'
    ws['R9'] = f'当前FPS的Target为:{target}'
    ws['R10'] = '    a.P≥4不达标(最大帧耗时大于等于64ms)'
    ws['R11'] = '    b.85%≤x<95%有条件通过(B级)'
    ws['R12'] = '    c.x≥95% 通过(A级)'





    # 保存修改后的 Excel 文件
    wb.save(file_path)
    wb.close()
    pass

def gpu_draw_polyline(gpu_df, title, sheet_name,y_axis_rotation=False):
    # 1. 将 'Time' 列转换为 datetime 格式
    gpu_df['Time'] = pd.to_datetime(gpu_df['Time'], format='%b %d %H:%M:%S.%f')

    # 2. 确保 'Percentage' 列是数值类型
    gpu_df['Percentage'] = pd.to_numeric(gpu_df['Percentage'])

    # 3. 使用 matplotlib 绘制折线图
    plt.figure(figsize=(10, 6))  # 设置图形大小
    plt.plot(gpu_df['Time'], gpu_df['Percentage'], marker='o', color='b', label='GPU Percentage')

    # 4. 设置图表的标题、标签
    plt.title('GPU Usage Over Time')
    plt.xlabel('Time')
    plt.ylabel('Percentage (%)')

    # 5. 显示图例
    plt.legend()

    # 6. 显示图形
    plt.xticks(rotation=90)  # 如果时间轴标签拥挤，可以旋转 X 轴标签
    plt.tight_layout()  # 调整图形的布局，防止标签被遮挡
    # plt.show()

    # 保存图表为图片文件
    save_path = os.path.join(BASE_PATH_RESULT, f"{sheet_name}.png")
    plt.savefig(save_path, bbox_inches='tight')
    plt.close()  # 关闭图形，释放资源
    return save_path


def insert_image_to_excel(report_path, img_path, sheet_name):
    # 加载现有的 Excel 工作簿
    wb = openpyxl.load_workbook(report_path)

    # 通过名字选择指定的 sheet 页
    sheet = wb[sheet_name]  # 替换 'SheetName' 为你的目标 sheet 页的名字

    # 使用 pandas 读取 sheet 页的数据
    df = pd.read_excel(report_path, sheet_name=sheet_name)

    # 找到最后一列的列号（以1为基数，即A列为1，B列为2，依此类推）
    last_col = len(df.columns)

    # 计算插入图片的位置，假设我们要在第一行，最后一列的后面一列插入图片
    # 注意：openpyxl使用的是1基索引，所以列索引需要加1
    img_col = last_col + 1
    img_row = 1
    img_position = openpyxl.utils.get_column_letter(img_col) + str(img_row)

    # 打开图片文件
    img = Image(img_path)

    # 指定图片插入的位置
    img.anchor = img_position

    # 将图片添加到 sheet 页中
    sheet.add_image(img)

    # 保存 Excel 文件（注意：这将覆盖原文件，建议保存前备份）
    wb.save(report_path)
    wb.close()


def processing_top(top_file_path):
    # 指定要搜索的关键字
    start_top_keyword = "Start top"
    end_top_keyword = "End  top"
    start_pid_keyword = "PID"
    start_time_keyword = "Start dump"
    end_time_keyword = "End dump"

    with open(top_file_path, 'r', encoding='utf-8') as f:
        lines = f.readlines()
        # 存储截取的文本段落
        top_extracted_paragraphs = []

        # 当前段落的行列表
        top_current_paragraph = []

        start_times = []
        end_times = []

        top_recording = False
        pid_recording = False

        top_df_header_list = ["PID", "USER", "PR", "NI", "VIRT", "RES", "SHR", "S", "%CPU", "%MEM", "TIME+", "ARGS"]

        for line in lines:
            # 检查开始时间
            if start_time_keyword in line:
                start_times.append(get_time(line))
                continue  # 跳过后续检查，直接进入下一行

            if end_time_keyword in line:
                end_times.append(get_time(line))

            # ---------------------------------------------------记录top-----------------------------------------------
            # 检查是否开始记录top
            if start_top_keyword in line:
                top_recording = True

                continue  # 跳过后续检查，直接进入下一行
            # 检查是否开始记录pid
            if start_pid_keyword in line:
                pid_recording = True
                continue  # 跳过后续检查，直接进入下一行

                # 检查是否停止记录top
            if top_recording and end_top_keyword in line:
                top_extracted_paragraphs.append(top_current_paragraph)
                top_recording = False
                pid_recording = False
                top_current_paragraph = []  # 重置当前段落
                continue  # 跳过后续检查，因为已经保存了段落

            # 如果正在记录top，则添加行到当前段落
            if top_recording and pid_recording:
                top_current_paragraph.append(line)

        top_dfs = []
        # 打印或处理截取的top段落
        for i, paragraph in enumerate(top_extracted_paragraphs):
            paragraph_new = []
            for line in paragraph:
                # 将行分割成列
                cleaned_list = top_line_match(line)
                if len(cleaned_list) > 12:
                    print(cleaned_list)
                paragraph_new.append(cleaned_list)

                # 创建 DataFrame 并插入时间戳列
            top_df = pd.DataFrame(paragraph_new, columns=top_df_header_list)
            top_df.insert(0, 'Time', start_times[i])
            # print(f"top_df{top_df}")

            top_dfs.append(top_df)

        # package_name_list = global_config.get('package_name_list', [])
        # print(package_name_list)

        # top_cpu_df = merge_top_df_with_cpu(top_dfs, type="%CPU", selected_args=package_name_list)
        top_cpu_df = merge_top_df_with_cpu(top_dfs, type="%CPU")
        # top_mem_df = merge_top_df_with_cpu(top_dfs, type="RES", selected_args=package_name_list)
        top_mem_df = merge_top_df_with_cpu(top_dfs, type="RES")

        top_Summary_df = pd.DataFrame(columns=["CPU/RES测试开始时间", "CPU/RES测试结束时间"])
        top_Summary_df.loc[0] = [start_times[0], end_times[-1]]
        f.close()

    return top_cpu_df, top_mem_df, top_Summary_df


def processing_meminfo(meminfo_file_path):
    start_time_keyword = "#Start dump"
    end_time_keyword = "#End dump"
    start_meminfo_keyword = "Start meminfo"
    end_meminfo_keyword = "End meminfo"

    start_dump_meminfo_keyword = "Start dump meminfo"
    end_dump_meminfo_keyword = "End dump meminfo"
    start_rss_keyword = "Total RSS by process"
    end_rss_keyword = "Total RSS by OOM adjustment"


    start_df_need_keywork= "Start df"
    end_df_need_keywork= "End df"

    start_fps_info_keyword = "Start fpsinfo"
    end_fps_info_keyword = "End fpsinfo"


    profiledata_keywork="PROFILEDATA"

    start_fps_keyword = "Stats since"
    end_fps_keyword = "50th percentile"

    df_name_need_list = ["/", "/dev", "/mnt", "/mnt/installer/0/emulated", "/mnt/installer/0/emulated/0/Android/data",
                         "/mnt/vendor/persist", "/vendor", "/apex", "/linkerconfig",
                         "/cert_block", "/resources/mapdata", "/resources/mapadas", "/log/offlinelogs", "/data_mirror",
                         "/fota", "/share"]
    df_used_need_list = []


    df_need_recording = False

    profiledata_recording = False
    profiledata_index=0

    fps_info_recording = False
    fps_recording = False

    with open(meminfo_file_path, 'r', encoding='utf-8') as f:
        lines = f.readlines()
        # 存储截取的文本段落
        dump_meminfo_extracted_paragraphs = []
        meminfo_extracted_paragraphs = []

        # 当前段落的行列表
        dump_meminfo_current_paragraph = []
        meminfo_current_paragraph = []

        fps_info_extracted_paragraphs = []

        # 当前段落的行列表
        fps_info_current_paragraph = []
        df_need_extracted_paragraphs = []
        df_need_current_paragraph = []
        profiledata_extracted_paragraphs = []
        profiledata_current_paragraph = []
        profiledata_input_map=OrderedDict()
        start_times = []
        end_times = []

        dump_meminfo_recording = False
        dump_meminfo_process_recording = False
        meminfo_recording = False

        fps_filter = False

        current_group_map={}
        project_name_dict={}
        current_project_name=""

        for line in lines:
            # 检查开始时间
            if start_time_keyword in line:
                start_times.append(get_time(line))
                continue  # 跳过后续检查，直接进入下一行

            if end_time_keyword in line:
                end_times.append(get_time(line))

            # ---------------------------------------------------记录dump meminfo-------------------------------------------
            # 检查是否开始记录top
            if start_rss_keyword in line:
                dump_meminfo_recording = True
                continue  # 跳过后续检查，直接进入下一行



            # 检查是否停止记录top
            if dump_meminfo_recording and end_rss_keyword in line:
                dump_meminfo_extracted_paragraphs.append(dump_meminfo_current_paragraph)
                dump_meminfo_recording = False
                dump_meminfo_current_paragraph = []  # 重置当前段落
                continue  # 跳过后续检查，因为已经保存了段落

            # 如果正在记录top，则添加行到当前段落
            if dump_meminfo_recording:
                dump_meminfo_current_paragraph.append(line)


            # # 检查是否开始记录top
            # if "Start dump meminfo" in line:
            #     dump_meminfo_recording = True
            #     continue  # 跳过后续检查，直接进入下一行
            #
            # if "Total RSS by process" in line:
            #     dump_meminfo_process_recording = True
            #     continue  # 跳过后续检查，直接进入下一行
            #
            #
            #
            # # 检查是否停止记录top
            # if dump_meminfo_recording and end_rss_keyword in line:
            #     dump_meminfo_extracted_paragraphs.append(dump_meminfo_current_paragraph)
            #     dump_meminfo_recording = False
            #     dump_meminfo_current_paragraph = []  # 重置当前段落
            #     continue  # 跳过后续检查，因为已经保存了段落
            #
            # # 如果正在记录top，则添加行到当前段落
            # if dump_meminfo_process_recording:
            #     dump_meminfo_current_paragraph.append(line)

            # ---------------------------------------------------记录meminfo-------------------------------------------
            # 检查是否开始记录top
            if start_meminfo_keyword in line:
                meminfo_recording = True
                continue  # 跳过后续检查，直接进入下一行
            # 检查是否停止记录top
            if meminfo_recording and end_meminfo_keyword in line:
                meminfo_extracted_paragraphs.append(meminfo_current_paragraph)
                meminfo_recording = False
                meminfo_current_paragraph = []  # 重置当前段落
                continue  # 跳过后续检查，因为已经保存了段落

            # 如果正在记录top，则添加行到当前段落
            if meminfo_recording:
                meminfo_current_paragraph.append(line)

            # ---------------------------------------------------记录fps_info-------------------------------------------
            if start_fps_info_keyword in line:
                fps_info_recording = True
                continue  # 跳过后续检查，直接进入下一行

            if fps_info_recording and start_fps_keyword in line:
                fps_recording = True
                continue  # 跳过后续检查，直接进入下一行

            if fps_info_recording and end_fps_keyword in line:
                fps_info_recording = False
                fps_recording = False
                fps_info_extracted_paragraphs.append(fps_info_current_paragraph)
                fps_info_current_paragraph = []  # 重置当前段落
                continue  # 跳过后续检查，因为已经保存了段落

            # 如果正在记录top，则添加行到当前段落
            if fps_info_recording and fps_recording:
                fps_info_current_paragraph.append(line)

            # ---------------------------------------------分包名记录fps_info-------------------------------------


            # ---------------------------------------------------记录df-------------------------------------------

            if start_df_need_keywork in line:
                df_need_recording = True
                continue  # 跳过后续检查，直接进入下一行

            if df_need_recording and end_df_need_keywork in line:
                df_need_extracted_paragraphs.append(df_need_current_paragraph)
                df_need_recording = False
                df_need_current_paragraph = []  # 重置当前段落
                continue  # 跳过后续检查，因为已经保存了段落

            # 如果正在记录df，则添加行到当前段落
            if df_need_recording:
                df_need_current_paragraph.append(line)

            # ---------------------------------------------------记录PROFILEDATA-------------------------------------------

            if "Graphics info for" in line:
                # fps_info_recording = True
                current_project_name = line
                # 判断 'c' 是否存在
                # if current_project_name in project_name_dict:
                #     # 如果存在，将当前值和新的值组成列表
                #     project_name_dict['c'].append(new_value)
                # else:
                #     # 如果不存在，创建键 'c' 并赋值为新的值
                #     project_name_dict['c'] = [new_value]
                continue  # 跳过后续检查，直接进入下一行


            # 当前PROFILEDATA段落结束
            if profiledata_recording and profiledata_keywork in line:
                profiledata_extracted_paragraphs.append(profiledata_current_paragraph)
                profiledata_recording = False
                profiledata_current_paragraph = []  # 重置当前段落

                if current_project_name in project_name_dict:
                    # 如果存在，将当前值和新的值组成列表
                    project_name_dict[current_project_name].append(current_group_map)
                else:
                    # 如果不存在，创建并赋值为新的值
                    project_name_dict[current_project_name] = [current_group_map]
                current_group_map={}
                continue  # 跳过后续检查，因为已经保存了段落

            if profiledata_keywork in line:
                profiledata_recording = True
                profiledata_index+=1
                continue  # 跳过后续检查，直接进入下一行

            # 如果正在记录PROFILEDATA，则添加行到当前段落
            if profiledata_recording:
                # profiledata_current_paragraph.append(line)
                data_list = line.split(',')
                flags = data_list[0]
                intended_vsync = data_list[1]
                try:

                    if fps_filter:
                        flags = int(data_list[0])  # 尝试将 data_list[0] 转换为 int 类型
                        # 如果 intended_vsync 已经在 profiledata_input_map 中，首先删除相关的行
                        if intended_vsync in profiledata_input_map:
                            # 找到原来对应的行并删除
                            old_line = profiledata_input_map[intended_vsync]
                            if old_line in profiledata_current_paragraph:
                                profiledata_current_paragraph.remove(old_line)

                        if profiledata_index > 0 and profiledata_index <= len(start_times):
                            profiledata_input_map[intended_vsync] = line, start_times[profiledata_index - 1]
                            current_group_map[intended_vsync] = line, start_times[profiledata_index - 1]
                            profiledata_current_paragraph.append(line)
                        else:
                            pass
                            # print("Index out of range")

                    else:
                        if flags == "0":
                            # 如果 intended_vsync 已经在 profiledata_input_map 中，首先删除相关的行
                            if intended_vsync in profiledata_input_map:
                                # 找到原来对应的行并删除
                                old_line = profiledata_input_map[intended_vsync]
                                if old_line in profiledata_current_paragraph:
                                    profiledata_current_paragraph.remove(old_line)

                            if profiledata_index > 0 and profiledata_index <= len(start_times):
                                profiledata_input_map[intended_vsync] = line, start_times[profiledata_index - 1]
                                current_group_map[intended_vsync] = line, start_times[profiledata_index - 1]
                                profiledata_current_paragraph.append(line)
                        pass
                    # if flags == "0":
                    #     # 如果 intended_vsync 已经在 profiledata_input_map 中，首先删除相关的行
                    #     if intended_vsync in profiledata_input_map:
                    #         # 找到原来对应的行并删除
                    #         old_line = profiledata_input_map[intended_vsync]
                    #         if old_line in profiledata_current_paragraph:
                    #             profiledata_current_paragraph.remove(old_line)
                    #
                    #     # 更新 profiledata_input_map 中的值
                    #     profiledata_input_map[intended_vsync] = line, start_times[profiledata_index - 1]
                    #     # 将新的行添加到 profiledata_current_paragraph
                    #     profiledata_current_paragraph.append(line)
                except ValueError:
                    pass
                    # 如果转换失败，捕获异常并处理
                    # print(f"无法将 {data_list[0]} 转换为 int 类型。")

            # --------------------------------------------------------------------------------------------------------------



        #-------------------------------------------------------处理dump meminfo数据-------------------------------------------------------
        # pattern = r"(\d{1,3}(?:,\d{3})*K|\d+K):\s([a-zA-Z0-9.]+)\s\(pid\s(\d+)\)"
        pattern = r"(\d{1,3}(?:,\d{3})*K|\d+K):\s+([a-zA-Z0-9._-]+)\s\("

        df_dump_meminfo_dfs=[]
        for i, paragraph in enumerate(dump_meminfo_extracted_paragraphs):
            # 定义一个空的字典
            df_dump_meminfo_dict = {
                "Time": start_times[i]
            }
            for line in paragraph:
                # 使用re.search()来查找匹配的内容
                # 找到冒号的位置
                colon_index = line.find(':')
                # 找到左括号的位置
                open_paren_index = line.find('(')

                if colon_index != -1 and open_paren_index != -1:
                    value_data=line[:colon_index].strip()
                    # 提取冒号和括号之间的部分
                    key_data= line[colon_index + 2: open_paren_index].strip()  # +2跳过冒号和后面的空格

                # match = re.search(pattern, line)
                # if match:
                #     # 获取最后一个数据
                #     key_data = match.group(2)
                #     # 获取第二个数据
                #     value_data = match.group(1).replace(",", "")

                    k_number=convert_to_bytes(value_data)
                    # if k_number:
                    #     if key_data in df_dump_meminfo_dict:
                    #         number=k_number+df_dump_meminfo_dict.get(key_data)
                    #     else:
                    #         number=k_number
                    # else:
                    #     number=""
                    number = k_number + df_dump_meminfo_dict.get(key_data, 0) if k_number else ""

                    df_dump_meminfo_dict[key_data] = number
            df_need_info_df = pd.DataFrame([df_dump_meminfo_dict])
            df_dump_meminfo_dfs.append(df_need_info_df)



        # 将meminfo_dfs中的所有dataframe合并为一个dataframe
        if len(df_dump_meminfo_dfs) > 0:
            dump_meminfo_df = pd.concat(df_dump_meminfo_dfs, ignore_index=True)
        else:
            dump_meminfo_df=pd.DataFrame()




        # -------------------------------------------------------处理profiledata数据-------------------------------------------------------
        # # 先排序
        # # 定义时间格式
        # time_format = '%a %b %d %H:%M:%S CST %Y'
        # # 提取时间并排序的函数
        # def get_profiledata_time(item):
        #     # 获取时间字符串，去掉可能的换行符和多余空格
        #     time_str = item[1][1].strip()  # item[1][1] 是时间字符串
        #     try:
        #         # 使用 datetime.strptime 将时间字符串转换为 datetime 对象
        #         return datetime.strptime(time_str, time_format)
        #     except ValueError:
        #         print(f"无法解析时间: {time_str}")
        #         return datetime(1900, 1, 1)  # 返回一个默认时间作为fallback
        #
        # # 使用 sorted 排序，按时间进行升序排序
        # sorted_profiledata = sorted(profiledata_input_map.items(), key=lambda item: get_profiledata_time(item))

        profiledata_info_df_dict={}
        project_name_dict
        profiledata_input_map
        display_frame_time_list = []
        for fps_name, group in project_name_dict.items():
            for value in group :

                sorted_profiledata = value

                profiledata_info_dfs = []


                for key, value in OrderedDict(sorted_profiledata).items():

                    line, start_time = value  # 解包元组

                    # 定义一个空的字典
                    profiledata_info_dict = {
                        "Time": start_time
                    }

                    # 将字符串按逗号分割成列表
                    data_list = line.split(',')

                    flags = data_list[0]

                    oldest_input_event = int(data_list[3])
                    intended_vsync = int(data_list[1])

                    animation_start = int(data_list[6])
                    handle_input_start = int(data_list[5])

                    perform_traversals_start = int(data_list[7])

                    draw_start = int(data_list[8])

                    sync_start = int(data_list[10])
                    issue_draw_commands_start = int(data_list[11])
                    swap_buffers = int(data_list[12])
                    frame_completed = int(data_list[13])

                    profiledata_info_dict["Other&Delay"] = oldest_input_event - intended_vsync
                    profiledata_info_dict["intended_vsync"] = intended_vsync
                    profiledata_info_dict["frame_completed"] = frame_completed
                    profiledata_info_dict["Input Event"] = animation_start - handle_input_start
                    profiledata_info_dict["Animation"] = perform_traversals_start - animation_start
                    profiledata_info_dict["MeasureAndLayout"] = draw_start - perform_traversals_start
                    profiledata_info_dict["Draw"] = sync_start - perform_traversals_start

                    profiledata_info_dict["Prepare"] = issue_draw_commands_start - sync_start
                    profiledata_info_dict["Commands"] = swap_buffers - issue_draw_commands_start
                    profiledata_info_dict["Swap buffer"] = frame_completed - swap_buffers
                    total = animation_start - handle_input_start + perform_traversals_start - animation_start + draw_start - perform_traversals_start + sync_start - perform_traversals_start + issue_draw_commands_start - sync_start + swap_buffers - issue_draw_commands_start + frame_completed - swap_buffers
                    profiledata_info_dict["normal total"] = (frame_completed - intended_vsync) if (
                                                                                                              frame_completed - intended_vsync) < 32000000 else 0
                    profiledata_info_dict["jank total"] = (frame_completed - intended_vsync) if 48000000 > (
                                frame_completed - intended_vsync) > 32000000 else 0
                    profiledata_info_dict["bigjank total"] = (frame_completed - intended_vsync) if 64000000 > (
                                frame_completed - intended_vsync) > 48000000 else 0
                    profiledata_info_dict["failed total"] = (frame_completed - intended_vsync) if (
                                                                                                              frame_completed - intended_vsync) > 64000000 else 0
                    profiledata_info_dict["frame time consumption"] = (frame_completed - intended_vsync)

                    if frame_completed - intended_vsync < 0:
                        print()
                        pass

                    display_frame_time_list.append(frame_completed - intended_vsync)

                    profiledata_info_df = pd.DataFrame([profiledata_info_dict])
                    profiledata_info_dfs.append(profiledata_info_df)
                if profiledata_info_dfs:
                    profiledata_info_df = pd.concat(profiledata_info_dfs, ignore_index=True)
                else:
                    profiledata_info_df = pd.DataFrame()

                profiledata_info_df_dict[fps_name]=profiledata_info_df
                pass






        # -------------------------------------------------------处理profiledata数据-------------------------------------------------------


        # -------------------------------------------------------处理df数据-------------------------------------------------------

        df_need_info_dfs=[]
        for i, paragraph in enumerate(df_need_extracted_paragraphs):
            # 定义一个空的字典
            df_need_info_dict = {
                "Time": start_times[i]
            }
            for line in paragraph:
                # 使用正则表达式匹配多个字段
                data_parts = line.split()
                # 获取最后一个数据
                key_data = data_parts[-1]

                # 获取第二个数据
                value_data = data_parts[2]

                if key_data == "/":
                    key_data="system"

                # 不保存start df 下第一条数据
                if value_data=="Used":
                    continue

                df_need_info_dict[key_data] = value_data
            df_need_info_df = pd.DataFrame([df_need_info_dict])
            df_need_info_dfs.append(df_need_info_df)



        # 将meminfo_dfs中的所有dataframe合并为一个dataframe
        if len(df_need_info_dfs) > 0:
            df_need_info_df = pd.concat(df_need_info_dfs, ignore_index=True)
        else:
            df_need_info_df=pd.DataFrame()
            # print("No DataFrames to concatenate")


        meminfo_dfs = []
        fps_info_dfs = []
        # 打印或处理截取的meminfo段落
        for i, paragraph in enumerate(meminfo_extracted_paragraphs):
            # 定义一个空的字典
            meminfo_dict = {
                "Time": start_times[i]
            }
            for line in paragraph:
                # 将":" 替换成空
                line = line.replace(":", "")
                # 按照空格分隔成列表
                cleaned_list = line.split()
                # 去除掉列表中的""元素
                cleaned_list = list(filter(None, cleaned_list))
                # 列表中的第一个元素是字典的key，列表中第二个元素是列表的Value
                meminfo_dict[cleaned_list[0]] = cleaned_list[1]

            meminfo_df = pd.DataFrame([meminfo_dict])
            meminfo_dfs.append(meminfo_df)
        # 将meminfo_dfs中的所有dataframe合并为一个dataframe
        if meminfo_dfs:
            meminfo_df = pd.concat(meminfo_dfs, ignore_index=True)
        else:
            meminfo_df =pd.DataFrame()

        meminfo_Summary_df = pd.DataFrame(
            columns=["Meminfo-total/disk/fps测试开始时间", "Meminfo-total/disk/fps测试结束时间"])
        meminfo_Summary_df.loc[0] = [start_times[0], end_times[-1]]

        fps_info_dfs = []
        for i, paragraph in enumerate(fps_info_extracted_paragraphs):
            # 定义一个空的字典
            fps_info_dict = {
                "Time": start_times[i]
            }
            for line in paragraph:
                # 按照":"将line 分割成列表
                cleaned_list = line.split(":")
                # 去除掉列表中的""元素
                cleaned_list = list(filter(None, cleaned_list))
                # 将cleaned_list[1]内容 ‘(’后面的内容删除
                cleaned_list[1] = cleaned_list[1].split("(")[0]

                # 列表中的第一个元素是字典的key，列表中第二个元素是列表的Value

                fps_info_dict[cleaned_list[0].rstrip().lstrip()] = cleaned_list[1].rstrip().lstrip()

            fps_info_df = pd.DataFrame([fps_info_dict])
            fps_info_dfs.append(fps_info_df)
        # 将meminfo_dfs中的所有dataframe合并为一个dataframe

        if fps_info_dfs:
            fps_info_df = pd.concat(fps_info_dfs, ignore_index=True)
            # 在fps_info_df中第二列位置上 插入一列 FPS， FPS的值  由fps_info_df['Total frames rendered']/(fps_info_df['Total frames rendered'] +fps_info_df['Janky frames'] )*60
            fps_info_df['FPS'] = fps_info_df['Total frames rendered'].astype("int64") / (
                    fps_info_df['Total frames rendered'].astype("int64") + fps_info_df['Janky frames'].astype(
                "int64")) * 60
        else:
            fps_info_df = pd.DataFrame()  # Create an empty DataFrame



        f.close()

        if display_frame_time_list is None:
            display_frame_time_list=[]


    return dump_meminfo_df,meminfo_df, meminfo_Summary_df, fps_info_df,df_need_info_df,profiledata_info_df_dict,display_frame_time_list


def convert_to_bytes(value):
    if 'K' in value:
        return float(value.replace('K', '').replace(',', ''))
    elif 'M' in value:
        return float(value.replace('M', '').replace(',', '')) * 1000
    elif 'G' in value:
        return float(value.replace('G', '').replace(',', '')) * 1000000
    else:
        return float(value.replace(',', ''))

def add_max_average(sheet_name):
    df = pd.read_excel(os.path.join(BASE_PATH_RESULT, "Android_status.xlsx"), sheet_name=sheet_name)
    # 选择第二列到最后一列的数据（忽略第一行的字段名）
    data = df.iloc[1:, 1:]
    # 获取行数
    num_rows = df.shape[0]
    # 计算最大值和平均值
    max_values = data.max()
    mean_values = data.mean()


    # 假设你已经有一个 Excel 文件路径
    file_path =  os.path.join(BASE_PATH_RESULT, "Android_status.xlsx")

    # 打开 Excel 文件

    wb = openpyxl.load_workbook(file_path)

    # 选择要操作的工作表
    sheet = wb[sheet_name]  # 或 wb[sheet_name] 选择特定的工作表



    # # 给第二行和第三行填充数据
    # sheet['A2'] = 'max_value'  # 第二行A列
    # sheet['A3'] = 'average_value'  # 第三行A列
    sheet.cell(row=num_rows + 2, column=1, value='max_value')
    sheet.cell(row=num_rows + 3, column=1, value='average_value')

    # 填充最大值到第二行（B2, C2, D2...）
    for idx, value in enumerate(max_values, start=2):  # 从B列开始填充
        sheet.cell(row=num_rows+2, column=idx, value=value)

    # 填充平均值到第三行（B3, C3, D3...）
    for idx, value in enumerate(mean_values, start=2):  # 从B列开始填充
        sheet.cell(row=num_rows+3, column=idx, value=value)

    # 保存文件
    wb.save(file_path)
    pass

def combined():

    log_file = os.path.join(BASE_PATH_SOURCE, "sysinfo-android-meminfo.log")  # 主 log 文件路径
    fps_folder = os.path.join(BASE_PATH_SOURCE, "fps")  # fps 文件夹路径
    output_file = os.path.join(BASE_PATH_SOURCE, "combined.log")   # 输出合并后的 log 文件路径

    # 创建/清空输出文件
    with open(output_file, 'w') as output:
        output.write('')  # 清空内容

    if os.path.exists(log_file):
        # 将主 log 文件内容添加到合并文件
        with open(log_file, 'r') as main_log:
            with open(output_file, 'a') as output:
                output.write(main_log.read())  # 将内容追加到输出文件

    # 遍历 fps 文件夹中的所有 log 文件
    for root, dirs, files in os.walk(fps_folder):
        for file in files:
            if file.endswith('.log'):
                log_path = os.path.join(root, file)
                with open(log_path, 'r') as log:
                    with open(output_file, 'a') as output:
                        output.write(log.read())  # 将每个 fps log 文件内容追加到输出文件
    pass


def android():
    combined()


    meminfo_source_file = os.path.join(BASE_PATH_SOURCE, "combined.log")
    # fps_info_df 原本FPS功能 暂时不用
    dump_meminfo_df,meminfo_df, meminfo_Summary_df, fps_info_df,df_need_info_df,profiledata_info_df_dict,display_frame_time_list = processing_meminfo(meminfo_source_file)

    top_source_file = os.path.join(BASE_PATH_SOURCE, "sysinfo-android.log")
    if os.path.exists(top_source_file):
        top_cpu_df, top_mem_df, top_Summary_df = processing_top(top_source_file)
    else:
        top_cpu_df=pd.DataFrame()
        top_mem_df=pd.DataFrame()
        top_Summary_df=pd.DataFrame()


    # 将top_df和meminfo_df保存为excel文件
    report_path = os.path.join(BASE_PATH_RESULT, "Android_status.xlsx")
    # 重置索引以确保合并时对齐正确
    # top_Summary_df.reset_index(drop=True, inplace=True)
    if not meminfo_Summary_df.empty:
        meminfo_Summary_df.reset_index(drop=True, inplace=True)
    if not df_need_info_df.empty:
        df_need_info_df.reset_index(drop=True, inplace=True)
    # profiledata_info_df.reset_index(drop=True, inplace=True)
    # 横向合并
    # Summary_df = pd.concat([top_Summary_df, meminfo_Summary_df], axis=1)

    with pd.ExcelWriter(report_path) as writer:
        if not meminfo_Summary_df.empty:
            meminfo_Summary_df.to_excel(writer, sheet_name="Summary", index=False)
        if not top_cpu_df.empty:
            top_cpu_df.to_excel(writer, sheet_name="CPU", index=False)
        # top_mem_df.to_excel(writer, sheet_name="Meminfo", index=False)
        # fps_info_df.to_excel(writer, sheet_name="FPS", index=False)
        if not dump_meminfo_df.empty:
            dump_meminfo_df.to_excel(writer, sheet_name="Meminfo", index=False)
        if not meminfo_df.empty:
            meminfo_df.to_excel(writer, sheet_name="Meminfo-total", index=False)
        if not df_need_info_df.empty:
            df_need_info_df.to_excel(writer, sheet_name="Disk", index=False)
        for sheet_name,connect in profiledata_info_df_dict.items():
            result = sheet_name.split("[")[1].split("]")[0]
            #  connect = pd.DataFrame()
            connect.to_excel(writer, sheet_name=result if len(result.encode('utf-8'))< 31 else result[result.rfind('.') + 1:], index=False)
        # profiledata_info_df.to_excel(writer, sheet_name="FPS", index=False)
    # # 绘制折线图
    # CPU_png_path = draw_polyline(top_cpu_df, "CPU使用情况", "CPU")
    # MEM_png_path = draw_polyline(top_mem_df, "Meminfo使用情况", "Meminfo")
    # meminfo_path = draw_polyline(meminfo_df, "内存总览（K）", "Meminfo-total")
    # # fps_info_path = draw_polyline(fps_info_df, "FPS总览（K）", "FPS")
    # df_need_info_path = draw_polyline(df_need_info_df, "Disk总览", "Disk",True)
    # # 绘制柱状图
    # profiledata_info_path=draw_bar_chart(profiledata_info_df,display_frame_time_list, "FPS总览", "FPS",1)
    # draw_a(top_cpu_df)
    # draw_b(top_cpu_df,"CPU", "CPU使用情况")
    # draw_b(top_mem_df,"Meminfo","Meminfo使用情况")
    # draw_b(meminfo_df,"Meminfo-total","内存总览（K）")
    # draw_b(df_need_info_df,"Disk","Disk总览")
    # draw_b(profiledata_info_df,"FPS","FPS总览")
    # draw_c()
    # draw_line_chart()

    if not df_need_info_df.empty:
        process_excel_data2("Disk")
    # if top_cpu_df.empty:
    #     pass
    # else:
    #     draw_line_chart2("CPU", 2, "CPU使用情况（%）")
    #
    # if top_mem_df.empty:
    #     pass
    # else:
    #     draw_line_chart2("Meminfo", 2, "Meminfo使用情况（K）")
    if top_cpu_df.empty:
        pass
    else:
        draw_line_chart2("CPU", 2, "CPU使用情况（%）")
    if dump_meminfo_df.empty:
        # print("Meminfo数据为空")
        pass
    else:
        draw_line_chart2("Meminfo", 2, "Meminfo使用情况（K）")


    if meminfo_df.empty:
        # print("Meminfo-total数据为空")
        pass
    else:
        draw_line_chart2("Meminfo-total", 2, "内存总览（K）")

    if df_need_info_df.empty:
        # print("Disk数据为空")
        pass
    else:
        draw_line_chart2("Disk", 2, "Disk总览（K）")



    if not profiledata_info_df_dict:
        pass
    else:
        for fps_sheet_name,connect in profiledata_info_df_dict.items():
            result = fps_sheet_name.split("[")[1].split("]")[0]
            sheet_name=result if len(result.encode('utf-8')) < 31 else result[result.rfind('.') + 1:]
            # connect = pd.DataFrame()


            x_percentage, failed_list_count, bigjank_list_count, jank_list_count, total_count,normal_list_count = add_display_frame_time_list(
                display_frame_time_list,sheet_name)
            draw_overlay_bar_chart(sheet_name)
            AutoFilter(sheet_name)
            add_FPS_info(sheet_name,result,x_percentage, failed_list_count, bigjank_list_count, jank_list_count, total_count,normal_list_count)





    print(f"已保存为 {report_path}")
    # 删除合并后的 combined.log 文件
    output_file=os.path.join(BASE_PATH_SOURCE, "combined.log")
    if os.path.exists(output_file):
        os.remove(output_file)
    else:
        pass


def get_cpu_states(states_line):
    # states_line = "CPU states: 38.5% user, 1.6% kernel[K"
    # 使用正则表达式提取百分比
    pattern = r"(\d+\.\d+)%\s*user,\s*(\d+\.\d+)%\s*kernel"
    match = re.search(pattern, states_line)
    if match:
        user_percentage = float(match.group(1))
        kernel_percentage = float(match.group(2))
        total_percentage = user_percentage + kernel_percentage
        # 四舍五入取1位小数
        total_percentage = round(total_percentage, 1)
        return f"{total_percentage}%"

    else:
        return "0%"


def processing_qnx_sys(qnx_sys_source_file):
    start_time_keyword = "Start dump"
    end_time_keyword = "End dump"

    start_showmem_keyword = "Start showmem"
    end_showmem_keyword = "End showmem"

    start_hogs_keyword = "Start hogs"
    end_hogs_keyword = "End hogs"
    start_cpu_states_keyword = "CPU states"
    start_pid_keyword = "PID"

    start_df_keyword = "Start df"
    end_df_keyword = "End df"

    with open(qnx_sys_source_file, 'r', encoding='utf-8') as f:
        lines = f.readlines()
        # 存储截取的文本段落
        hogs_pid_extracted_paragraphs = []
        showmem_extracted_paragraphs = []

        # 当前段落的行列表
        hogs_pid_current_paragraph = []
        showmem_current_paragraph = []
        top_cpu_states_paragraph = []
        start_times = []
        end_times = []
        df_current_paragraph = []
        df_extracted_paragraphs = []

        hogs_recording = False
        pid_recording = False
        showmem_recording = False
        df_recording = False
        is_df_first_line = True
        top_df_header_list = ["PID", "NAME", "MSEC", "PIDS", "SYS", "MEMORY", "/"]
        showmem_header_list = ["Mem", "Total(KB)", "Used(KB)", "Free(KB)"]
        df_header_list = ["A", "B", "Size", "used", "E", "Name"]
        df_name_list = ["/var/", "/fs/data/", "/fs/share/", "fs/log/", "/fs/share2/", "/fs/sota/", "/persist/","/firmware/"]
        showmem_continue = True
        for line in lines:
            # 检查开始时间
            if start_time_keyword in line:
                start_times.append(get_time(line))
                continue  # 跳过后续检查，直接进入下一行

            if end_time_keyword in line:
                end_times.append(get_time(line))

            # ---------------------------------------------------记录showmem-----------------------------------------------
            # 检车是否开始记录showmem

            if start_showmem_keyword in line:
                showmem_recording = True
                continue  # 跳过后续检查，直接进入下一行

            if showmem_recording and showmem_continue:
                if "-----" in line:
                    continue  # 跳过第一行
                if "Mem" in line:
                    continue  # 跳过第二行
                showmem_continue = False

            # 检查是否停止记录showmem
            if showmem_recording and end_showmem_keyword in line:
                showmem_extracted_paragraphs.append(showmem_current_paragraph)
                showmem_recording = False
                showmem_continue = True
                showmem_current_paragraph = []  # 重置当前段落

                continue  # 跳过后续检查，因为已经保存了段落

            # 如果正在记录showmem，则添加行到当前段落
            if showmem_recording and showmem_continue == False:
                showmem_current_paragraph.append(line)

                continue  # 跳过后续检查，因为已经保存了段落

            # ---------------------------------------------------记录top-----------------------------------------------
            # 检车是否开始记录cpu states
            if start_cpu_states_keyword in line:
                top_cpu_states_paragraph.append(line)
                continue  # 跳过后续检查，直接进入下一行

            # 检查是否开始记录hogs
            if start_hogs_keyword in line:
                hogs_recording = True

                continue  # 跳过后续检查，直接进入下一行

            # 检查是否开始记录pid
            if start_pid_keyword in line:
                pid_recording = True
                continue  # 跳过后续检查，直接进入下一行

            # 检查是否停止记录top
            if hogs_recording and end_hogs_keyword in line:
                hogs_pid_extracted_paragraphs.append(hogs_pid_current_paragraph)
                hogs_recording = False
                hogs_pid_current_paragraph = []  # 重置当前段落
                continue  # 跳过后续检查，因为已经保存了段落

            # 如果正在记录top，则添加行到当前段落
            if hogs_recording and pid_recording:
                if "%" in line:
                    hogs_pid_current_paragraph.append(line)
                else:
                    pid_recording = False
            # ---------------------------------------------------记录df-----------------------------------------------
            if start_df_keyword in line:
                df_recording = True
                continue  # 跳过后续检查，直接进入下一行

            # 检查是否停止记录df
            if df_recording and end_df_keyword in line:
                df_extracted_paragraphs.append(df_current_paragraph)
                df_recording = False
                is_df_first_line = True
                df_current_paragraph = []  # 重置当前段落
                continue  # 跳过后续检查，因为已经保存了段落

            # 如果正在记录df，则添加行到当前段落
            if df_recording:
                if is_df_first_line:
                    df_current_paragraph.append(line)
                    # 记录第一行后 关闭
                    is_df_first_line = False
                    continue  # 跳过后续检查，因为已经保存了段落
                for df_name in df_name_list:
                    if df_name in line:
                        df_current_paragraph.append(line)
                        break



        top_dfs = []
        total_list = []
        # 打印或处理截取的hog段落
        for i, paragraph in enumerate(hogs_pid_extracted_paragraphs):
            paragraph_new = []
            for line in paragraph:
                # 将行分割成列
                cleaned_list = hogs_line_match(line)
                paragraph_new.append(cleaned_list)

            for states in top_cpu_states_paragraph:
                total = get_cpu_states(states)
                # 去掉百分号，只保留数字部分,为excel生成折线图使用
                total_list.append(total.rstrip('%'))
                # 创建 DataFrame 并插入时间戳列
            top_df = pd.DataFrame(paragraph_new, columns=top_df_header_list)
            top_df.insert(0, 'Time', start_times[i])
            top_df.insert(1, 'total', total_list[i])

            top_dfs.append(top_df)
        hog_cpu_df = merge_hog_df_with_cpu(top_dfs, columns_name="NAME", type="PIDS")
        # 处理showmem段落
        showmem_dfs = []
        for i, paragraph in enumerate(showmem_extracted_paragraphs):
            paragraph_new = []
            for line in paragraph:
                # 将行分割成列
                cleaned_list = showmem_line_match(line)
                paragraph_new.append(cleaned_list)
            showmem_df = pd.DataFrame(paragraph_new, columns=showmem_header_list)
            showmem_df.insert(0, 'Time', start_times[i])
            showmem_dfs.append(showmem_df)
        hog_showmem_df = merge_hog_df_with_cpu(showmem_dfs, columns_name="Mem", type="Used(KB)")
        hog_showmem_total_df = merge_hog_df_with_cpu(showmem_dfs, columns_name="Mem", type="Total(KB)")
        showmem_dict_list = []
        # 获取hog_showmem_total_df的列名
        header = hog_showmem_total_df.columns
        # 遍历每一行
        for i in range(len(header)):
            if i == 0:
                continue
            showmem_dict = {
                "Name": header[i],
                "Total": hog_showmem_total_df[header[i]].max(),
                "MaxUsed": hog_showmem_df[header[i]].max()
            }
            showmem_dict_list.append(showmem_dict)
        showmem_dict_df = pd.DataFrame(showmem_dict_list)

        # 将showmem_dict_df 插入到hog_showmem_df最后一列
        hog_showmem_df[""] = ""
        last_col = len(hog_showmem_df.columns)

        for col in showmem_dict_df.columns:
            hog_showmem_df.insert(last_col, col, showmem_dict_df[col])
            last_col += 1

        # 打印或处理截取的app段落

        hog_MEMORY_df = merge_hog_df_with_cpu(top_dfs, columns_name="NAME", type="MEMORY", is_total=False)
        hog_MEMORY_df["total"] = hog_showmem_df["sysram"]
        # 将hog_MEMORY_df["Total"] 调整到第2列
        cols = hog_MEMORY_df.columns.tolist()
        cols.insert(1, cols.pop(cols.index("total")))
        hog_MEMORY_df = hog_MEMORY_df[cols]

        df_dfs = []
        # 打印或处理截取的df段落
        for i, paragraph in enumerate(df_extracted_paragraphs):
            paragraph_new = []
            for line in paragraph:
                # 将行分割成列
                cleaned_list = showmem_line_match(line)
                paragraph_new.append(cleaned_list)
            df_df = pd.DataFrame(paragraph_new, columns=df_header_list)
            df_df.insert(0, 'Time', start_times[i])
            df_dfs.append(df_df)
        hog_df_df = merge_hog_df_with_cpu(df_dfs, columns_name="Name", type="Size")
        hog_df_df_used = merge_hog_df_with_cpu(df_dfs, columns_name="Name", type="used")
        df_dict_list = []

        # 将hog_df_df的列名中 “/” 替换为 system
        # hog_df_df.columns = hog_df_df.columns.str.replace("/", "system")
        hog_df_df.rename(columns={'/': 'system'}, inplace=True)
        hog_df_df_used.rename(columns={'/': 'system'}, inplace=True)
        # 获取hog_showmem_total_df的列名
        header = hog_df_df.columns
        # 遍历每一行
        for i in range(len(header)):
            if i == 0:
                continue
            showmem_dict = {
                "Name": header[i],
                "Total": hog_df_df[header[i]].max(),
                "Used": hog_df_df_used[header[i]].max()
            }
            df_dict_list.append(showmem_dict)
        df_dict_df = pd.DataFrame(df_dict_list)

        # 将showmem_dict_df 插入到hog_showmem_df最后一列
        hog_df_df[""] = ""
        last_col = len(hog_df_df.columns)

        for col in df_dict_df.columns:
            hog_df_df.insert(last_col, col, df_dict_df[col])
            last_col += 1

        top_Summary_df = pd.DataFrame(columns=["sys测试开始时间", "sys测试结束时间"])
        top_Summary_df.loc[0] = [start_times[0], end_times[-1]]
        f.close()

    return hog_cpu_df, hog_showmem_df, hog_MEMORY_df, hog_df_df, top_Summary_df

    pass


def processing_gpu_sys(qnx_gpu_source_file):
    with open(qnx_gpu_source_file, 'r', encoding='utf-8') as f:
        lines = f.readlines()

        dict_list = []
        for line in lines:
            # 将line 用 空格分割成列表
            line_list = line.split()
            # 去除列表中的“”
            line_list = [x for x in line_list if x != ""]
            time = line_list[0] + " " + line_list[1] + " " + line_list[2]
            Percentage = line_list[-1].replace("%", "")

            gpu_dict = {
                "Time": time,
                "Percentage": Percentage
            }
            dict_list.append(gpu_dict)
        gpu_df = pd.DataFrame(dict_list)
    f.close()
    return gpu_df


def QNX():
    qnx_sys_source_file = os.path.join(BASE_PATH_SOURCE, "qnx_sys_status.txt")
    if os.path.exists(qnx_sys_source_file):
        hog_cpu_df, hog_showmem_df, hog_MEMORY_df, hog_df_df, top_Summary_df = processing_qnx_sys(qnx_sys_source_file)
    else:
        hog_cpu_df = pd.DataFrame()
        hog_showmem_df = pd.DataFrame()
        hog_MEMORY_df = pd.DataFrame()
        hog_df_df = pd.DataFrame()
        top_Summary_df = pd.DataFrame()
    qnx_gpu_source_file = os.path.join(BASE_PATH_SOURCE, "qnx_gpu_status.txt")
    if os.path.exists(qnx_gpu_source_file):
        gpu_df = processing_gpu_sys(qnx_gpu_source_file)
    else:
        gpu_df = pd.DataFrame()

    report_path = os.path.join(BASE_PATH_RESULT, "QNX_status.xlsx")
    with pd.ExcelWriter(report_path) as writer:
        if not top_Summary_df.empty:
            top_Summary_df.to_excel(writer, sheet_name="Summary", index=False)
        if not hog_cpu_df.empty:
            hog_cpu_df.to_excel(writer, sheet_name="CPU", index=False)
        if not hog_showmem_df.empty:
            hog_showmem_df.to_excel(writer, sheet_name="Meminfo", index=False)
        if not hog_MEMORY_df.empty:
            hog_MEMORY_df.to_excel(writer, sheet_name="App", index=False)
        if not hog_df_df.empty:
            hog_df_df.to_excel(writer, sheet_name="Disk", index=False)
        if not gpu_df.empty:
            gpu_df.to_excel(writer, sheet_name="GPU", index=False)

    if not hog_MEMORY_df.empty:
        process_excel_data("App")
    if not hog_df_df.empty:
        process_excel_data("Disk")

    if not hog_cpu_df.empty:
        draw_c("CPU",2,"CPU使用情况")
    if not hog_showmem_df.empty:
        draw_c("Meminfo",2,"QNX总内存")
    if not hog_MEMORY_df.empty:
        draw_c("App",3,"应用内存（K）")
    if not hog_df_df.empty:
        draw_c("Disk",2,"磁盘使用情况（K）")
    if not gpu_df.empty:
        draw_c("GPU",2,"GPU使用情况(%)")
    #
    # CPU_png_path = draw_polyline(hog_cpu_df, "CPU使用情况", "CPU")
    # Meminfo_png_path = draw_polyline(hog_showmem_df, "QNX总内存", "Meminfo")
    # App_png_path = draw_polyline(hog_showmem_df, "应用内存（K）", "App")
    # disk_png_path = draw_polyline(hog_df_df, "磁盘使用情况（K）", "Disk")
    # gpu_png_path = gpu_draw_polyline(gpu_df, "GPU使用情况(%)", "GPU")
    #
    # insert_image_to_excel(report_path, CPU_png_path, "CPU")
    # insert_image_to_excel(report_path, Meminfo_png_path, "Meminfo")
    # insert_image_to_excel(report_path, App_png_path, "App")
    # insert_image_to_excel(report_path, disk_png_path, "Disk")
    # insert_image_to_excel(report_path, gpu_png_path, "GPU")
    print(f"已保存为 {report_path}")
    pass


if __name__ == '__main__':
    # 读取源文件

    android()

    qnx_sys_source_file = os.path.join(BASE_PATH_SOURCE, "qnx_sys_status.txt")
    qnx_gpu_source_file = os.path.join(BASE_PATH_SOURCE, "qnx_gpu_status.txt")
    if os.path.exists(qnx_sys_source_file or qnx_gpu_source_file):
        QNX()
