from pathlib import Path
import subprocess
import sqlite3
from collections import defaultdict
import re
from functools import partial
import time
import warnings

from tqdm import tqdm
import pandas as pd
import numpy as np

from config import TRACE_FILE_FDR, experiment_name


process_names = ["plication250227", "ts.ace.napitest", "com.rnoh.tester"]


android_benchmark = {"image": 625, "text": 978.9, "view": 340}
column_descriptions = {
    "trace_name": "htrace文件名",
    "full_time_trace": "从trace中解析获取的benchmark运行总时间",
    "full_time_alert": "应用中统计的benchmark运行总时间，和运行结束后弹框显示的一致",
    "js_stage_time": "RNOH_JS线程上运行应用代码，yoga layout，diff等RN逻辑的时间",
    "ui_stage_time": "ui线程上运行ArkUI组件创建，属性设置等任务的时间",
    "large_busy_js": "JS阶段大核Running状态占JS阶段总时间的比例",
    "large_busy_ui": "UI阶段大核Running状态占UI阶段总时间的比例",
    "large_busy": "benchmark运行全过程中大核Running状态的时间比例",
    "js_running": "js阶段js线程Running状态总时间",
    "js_large_core": "js阶段js线程在大核Running的时间占总Running时间的比例",
    "js_mid_core": "js阶段js线程在中核Running的时间占总Running时间的比例",
    "ui_running": "ui阶段ui线程Running状态的总时间",
    "ui_large_core": "ui阶段ui线程在大核Running的总时间占总Running时间的比例",
    "ui_mid_core": "ui阶段ui线程在中核Running总时间占总Running时间的比例",
    "prealloc_run_pct": "js阶段ui线程Running时间/js阶段总时间",
    "prealloc_large_core": "预创建阶段ui线程在大核Running时间占总Running时间的比例",
    "prealloc_mid_core": "预创建阶段ui线程在中核Running时间占总Running时间的比例",
    "start_time": "benchmark开始的时间戳",
    "finish_time": "benchmark结束的时间戳",
    "js_end_time": "js阶段结束的时间戳",
    "ui_start_time": "ui阶段开始的时间戳",
    "alert_time": "弹框显示应用统计bechmark时间的时间戳",
}

cpu_types = [
    "s",
    "s",
    "s",
    "s",
    "m",
    "m",
    "m",
    "m",
    "m",
    "m",
    "l",
    "l",
]

warnings.filterwarnings("ignore", category=UserWarning, module="openpyxl")


def analyze(experiment_name: str):
    trace_fdr = TRACE_FILE_FDR / experiment_name

    # bash util
    def run_cmd(cmd: str):
        splited = cmd.split(" ")
        splited = [part for part in splited if part != ""]
        # subprocess.run(splited, text=True, stdout=sys.stdout, stderr=sys.stderr)
        subprocess.run(splited, text=True)

    # sql util
    def fetchone(sql: str, cur: sqlite3.Cursor, throw_on_not_found: bool = True):
        cur.execute(sql)
        res = cur.fetchall()
        if throw_on_not_found:
            assert len(res) == 1, f"{sql}\n expect 1, got {len(res)}"
        return res[0] if len(res) == 1 else None

    def fetchall(sql: str, cur: sqlite3.Cursor):
        cur.execute(sql)
        res = cur.fetchall()
        return res

    def thread_cpu_stat(thread_id: int, sts: int, ets: int):
        thread_states = fetchall(
            f"SELECT * FROM thread_state WHERE itid = {thread_id} AND ts + dur >= {sts} AND ts <= {ets} ORDER BY ts ASC"
        )

        cpu_times = defaultdict(lambda: 0)
        cpu_states = defaultdict(lambda: 0)
        for state in thread_states:
            dur = (min(ets, state["ts"] + state["dur"]) - max(sts, state["ts"])) / 10**6
            cpu_states[state["state"]] += dur
            # is running on cpu
            if state["cpu"]:
                cpu_times[cpu_types[int(state["cpu"])]] += dur
        tot_time = sum(cpu_times.values())
        cpu_times_pct = {k: v / tot_time for k, v in cpu_times.items()}
        tot_time = sum(cpu_states.values())
        cpu_states_pct = {k: v / tot_time for k, v in cpu_states.items()}

        return {
            "cpu_times": cpu_times,
            "cpu_times_pct": cpu_times_pct,
            "cpu_states": cpu_states,
            "cpu_states_pct": cpu_states_pct,
        }

    def cpu_usage_stat(sts: int, ets: int):
        thread_states = fetchall(f"SELECT * FROM thread_state WHERE ts + dur >= {sts} AND ts <= {ets} ORDER BY ts ASC")

        cpu_times = defaultdict(lambda: 0)
        for state in thread_states:
            if state["cpu"]:
                dur = (min(ets, state["ts"] + state["dur"]) - max(sts, state["ts"])) / 10**6
                cpu_times[int(state["cpu"])] += dur
        dur = (ets - sts) / 10**6
        cpu_times_pct = {k: v / dur for k, v in cpu_times.items()}

        return {
            "cpu_times": cpu_times,
            "cpu_times_pct": cpu_times_pct,
        }

    def get_covering_slice(slice_id: int, target_name_part: str) -> dict:
        """get the top trace slice from a slice id

        Args:
            slice_id (int): the slice you want to find the top slice from

        Returns:
            dict: dict of ALL columns for top slice
        """

        for _ in range(100):
            try:
                curr_slice = fetchone(f"SELECT * from callstack WHERE id = {slice_id}")
            except AssertionError:
                return None
            if target_name_part in curr_slice["name"]:
                return curr_slice

            slice_id = curr_slice["parent_id"]

    def get_dominant_frame_node_name(cur: sqlite3.Cursor, ui_thread_id: int):
        frame_node_slices = cur.execute(
            f"select name from callstack where name like 'H:FrameNode[%][id:%]::RenderTask%' AND callid = {ui_thread_id}"
        )
        frame_node_counter = defaultdict(lambda: 0)
        for frame_node_slice in frame_node_slices:
            name = re.sub("\\[id:[0-9]*\\]", "[id:%]", frame_node_slice["name"])
            frame_node_counter[name] += 1
        return max(frame_node_counter, key=frame_node_counter.get)

    def count_and_tot_time(
        cur: sqlite3.Cursor, name_pattern: str, thread_id: int = None, sts: int = None, ets: int = None
    ):
        cmd = f"FROM callstack WHERE name {'like' if '%' in name_pattern else '='} '{name_pattern}'"
        if thread_id:
            cmd += f" AND callid = {thread_id}"
        if sts:
            cmd += f" AND ts >= {sts}"
        if ets:
            cmd += f" AND ts < {ets}"
        count = cur.execute("SELECT count(1) " + cmd).fetchone()["count(1)"]
        if count:
            tot_time = cur.execute("SELECT sum(dur) " + cmd).fetchone()["sum(dur)"] / 10**6
        else:
            tot_time = 0
        return {f"{name_pattern} count": count, f"{name_pattern} tot_time": tot_time}

    def find_nth_index(string: str, char: str, n: int) -> int:
        """Find the nth occurrence of a character in a string.

        Args:
            string (str): The string to search.
            char (str): The character to find.
            n (int): The occurrence number.

        Returns:
            int: The index of the nth occurrence of the character, or -1 if not found.
        """
        start = string.find(char)
        while start >= 0 and n > 1:
            start = string.find(char, start + 1)
            n -= 1
        return start

    processed = pd.DataFrame()
    processed_names = []

    try:
        if (trace_fdr / "result.csv").exists():
            processed = pd.read_csv(trace_fdr / "result.csv")
            if "trace_name" in processed.columns:
                processed_names = processed["trace_name"].tolist()
    except Exception as e:
        print(f"Error loading processed data: {e}")

    results = []
    trace_paths = [p for p in trace_fdr.glob("*.htrace") if p.name not in processed_names]
    for trace_path in tqdm(trace_paths):
        # if (
        #     trace_path.name
        #     != "image@cmake@bisheng-tune-notrooted@ALN-AL00_51052_SP31DEVC00E52R4P1log@rooted@28@HUAWEI_Mate_60_Pro@1D20AA87@03-01_12-36-22.htrace"
        # ):
        #     continue

        print(trace_path)

        # Check if file was created less than 10 seconds ago
        file_ctime = Path(trace_path).stat().st_ctime
        if time.time() - file_ctime < 5:
            print(f"Skipping: too recent")
            continue

        try:
            result = {}
            # convert trace file to sqlite for querying
            db_path = trace_path.parent / trace_path.name.replace(".htrace", ".db")
            if not db_path.exists():
                run_cmd(
                    f"trace_streamer_mac {str(
                    trace_path)} -e {db_path}"
                )

            ts_path = db_path.parent / (db_path.name + ".ohos.ts")
            if ts_path.exists():
                run_cmd(f"rm {str(ts_path)}")

            con = sqlite3.connect(db_path)
            con.row_factory = sqlite3.Row
            cur = con.cursor()

            fetchone = partial(fetchone, cur=cur)
            fetchall = partial(fetchall, cur=cur)

            # find ui process
            process_id = None
            for name in process_names:
                try:
                    process_id = fetchone(f"SELECT ipid FROM process WHERE name = '{name}';")["ipid"]
                    process_name = name
                except AssertionError:
                    pass
            if process_id is None:
                raise RuntimeError("ui process not found")

            ui_thread_id = fetchone(
                f"SELECT * FROM thread WHERE name = '{process_name}' AND ipid = {process_id} AND is_main_thread = 1;"
            )["itid"]
            js_thread_id = fetchone(
                f"SELECT * FROM thread WHERE name = 'RNOH_JS' AND ipid = {process_id} ORDER BY (SELECT COUNT(1) FROM callstack WHERE callid = tid) DESC LIMIT 1"
            )["itid"]
            trace_start_time = fetchone("SELECT * FROM trace_range")["start_ts"]

            # get start time
            touch_slice = fetchone(
                f"SELECT ts, parent_id FROM callstack WHERE (name LIKE 'H:UIManagerBinding::dispatchEventtypetopTouchEnd%' OR name LIKE 'H:UIManagerBinding::dispatchEvent[type][topTouchEnd]%') AND callid = {js_thread_id} ORDER BY ts DESC LIMIT 1"
            )
            js_stage_slice = get_covering_slice(touch_slice["parent_id"], "H:#RNOH::TaskExecutor::runningTask")
            if js_stage_slice is None:
                js_stage_slice = get_covering_slice(touch_slice["parent_id"], "H:#RNOH::TaskRunner::task")
            assert js_stage_slice is not None, f"js_stage_slice not found"
            assert js_stage_slice["dur"] is not None, f"js_stage_slice dur not found"
            start_ts = js_stage_slice["ts"]

            try:
                alert_slice = fetchone(
                    f"SELECT ts, name, dur FROM callstack WHERE name LIKE 'H:benchmarkAlerting: %' AND callid = {js_thread_id} ORDER BY ts DESC LIMIT 1"
                )
            except AssertionError:
                alert_slice = None

            # get finish time
            dominant_node_pattern = get_dominant_frame_node_name(cur, ui_thread_id)
            last_node_render_slice = fetchone(
                f"""SELECT * FROM callstack WHERE name LIKE '{dominant_node_pattern}' and callid = {ui_thread_id} {f"AND ts < {alert_slice['ts']}" if alert_slice is not None else ""} ORDER BY ts DESC LIMIT 1"""
            )
            last_did_mount = fetchone(
                f"SELECT ts, dur, name FROM callstack WHERE (name LIKE 'H:#RNOH::MountingManager::didMount %' OR name LIKE 'H:RNOH::MountingManagerCAPI::didMount%') AND callid = {ui_thread_id} ORDER BY ts DESC LIMIT 1;"
            )
            vsync_after_last_did_mount = fetchone(
                f"SELECT name, ts, dur FROM callstack WHERE name LIKE 'H:OnVsyncCallback%' AND callid = {ui_thread_id} AND ts > {last_did_mount['ts'] + last_did_mount['dur']} ORDER BY ts ASC LIMIT 1;"
            )
            last_vsync = get_covering_slice(last_node_render_slice["parent_id"], "H:OnVsyncCallback ")
            finish_ts = max(
                last_vsync["ts"] + last_vsync["dur"],
                vsync_after_last_did_mount["ts"] + vsync_after_last_did_mount["dur"],
            )

            should_finish_before = 4 if "@rooted@" in trace_path.name else 2
            if finish_ts - trace_start_time > should_finish_before * 10**9:
                raise RuntimeError(
                    f"Finish render after trace started for {should_finish_before}s , check trace window. Finish time: {finish_ts}"
                )

            # get ui stage start time
            try:
                first_do_mount = fetchone(
                    f"SELECT * FROM callstack WHERE name LIKE 'H:#RNOH::MountingManager::doMount%' AND callid = {ui_thread_id} and ts > {js_stage_slice['ts']} ORDER BY ts ASC LIMIT 1"
                )
                ui_start_time = first_do_mount["ts"]
            except AssertionError:
                ui_start_time = js_stage_slice["ts"] + js_stage_slice["dur"]

            full_time_trace = (finish_ts - start_ts) / 10**6
            assert full_time_trace > 100, f"full_time_trace too short: {full_time_trace}"
            full_time_alert = "" if alert_slice is None else float(alert_slice["name"].split(" ")[1])
            js_stage_time = js_stage_slice["dur"] / 10**6
            ui_stage_time = (finish_ts - ui_start_time) / 10**6

            # calculate ui stage top level trace coverage
            ui_stage_top_level_covered_time = fetchone(
                f"SELECT SUM(dur) FROM callstack WHERE depth = 0 AND callid = {ui_thread_id} AND ts > {ui_start_time} AND ts + dur < {finish_ts}"
            )["SUM(dur)"]

            js_slice_count = fetchone(
                f"SELECT COUNT(1) FROM callstack WHERE callid = {js_thread_id} AND ts >= {start_ts} AND ts < {finish_ts};"
            )["COUNT(1)"]
            ui_slice_count = fetchone(
                f"SELECT COUNT(1) FROM callstack WHERE callid = {ui_thread_id} AND ts >= {start_ts} AND ts < {finish_ts};"
            )["COUNT(1)"]

            # count number of elements rendered during preallocation
            prealloc_count = fetchone(
                f"SELECT COUNT(1) FROM callstack WHERE name LIKE 'H:OnLayoutFinish%' AND callid = {ui_thread_id} AND ts >= {start_ts} AND ts < {ui_start_time};"
            )["COUNT(1)"]

            js_cpu_stats = thread_cpu_stat(js_thread_id, start_ts, start_ts + js_stage_slice["dur"])
            ui_cpu_stats = thread_cpu_stat(ui_thread_id, ui_start_time, finish_ts)
            prealloc_cpu_stats = thread_cpu_stat(ui_thread_id, start_ts, ui_start_time)
            js_all_cpu_usage = cpu_usage_stat(start_ts, start_ts + js_stage_slice["dur"])
            ui_all_cpu_usage = cpu_usage_stat(ui_start_time, finish_ts)
            all_cpu_usage = cpu_usage_stat(start_ts, finish_ts)
            android_time = android_benchmark[trace_path.name.split("@")[0]]

            result = {
                **result,
                "trace_name": trace_path.name,
                "full_time_trace": full_time_trace,
                "android_pct": android_time / full_time_trace,
                "full_time_alert": full_time_alert,
                "js_stage_time": js_stage_time,
                "ui_stage_time": ui_stage_time,
                "ui_js_overlap": (start_ts + js_stage_slice["dur"] - ui_start_time) / 10**6,
                "prealloc_count": prealloc_count,
                "ui_stage_top_level_covered_time": ui_stage_top_level_covered_time / 10**6,
                "ui_stage_top_level_covered_pct": ui_stage_top_level_covered_time / (finish_ts - ui_start_time),
            }

            # cpu frequency
            def get_freq_stats(cpu_id: int, sts: int, ets: int):
                try:
                    freq_filter_id = fetchone(
                        f"SELECT * FROM cpu_measure_filter WHERE cpu = {cpu_id} AND name = 'cpu_frequency'"
                    )["id"]
                except AssertionError:
                    return 0, 0
                freq_measures = fetchall(
                    f"SELECT * FROM measure WHERE filter_id = {freq_filter_id} AND ts + dur >= {sts} AND ts <= {ets}"
                )
                tot_time = 0
                tot_khzms = 0
                max_freq = -1
                for freq_measure in freq_measures:
                    max_freq = max(max_freq, freq_measure["value"])
                    start = max(sts, freq_measure["ts"])
                    finish = min(ets, freq_measure["ts"] + freq_measure["dur"])
                    time = (finish - start) / 10**6
                    tot_time += time
                    tot_khzms += freq_measure["value"] * time
                return max_freq, tot_khzms / tot_time

            ten_max_freq, ten_avg_freq = get_freq_stats(10, start_ts, finish_ts)
            six_max_freq, six_avg_freq = get_freq_stats(6, start_ts, finish_ts)

            try:
                ids = fetchone(f"SELECT thread_id, process_id FROM perf_thread WHERE thread_name = '{process_name}'")
                ui_thread_id, ui_process_id = ids["thread_id"], ids["process_id"]
                js_thread_id = fetchone("SELECT thread_id FROM perf_thread WHERE thread_name = 'RNOH_JS'")["thread_id"]
                all_thread_ids = tuple(
                    str(r["thread_id"])
                    for r in fetchall(f"SELECT thread_id FROM perf_thread WHERE process_id = {ui_process_id}")
                )
                cpu_cycle_id = fetchone("SELECT id FROM perf_report WHERE report_value = 'hw-cpu-cycles';")["id"]
                instruction_count_id = fetchone("SELECT id FROM perf_report WHERE report_value = 'hw-instructions';")[
                    "id"
                ]

                def get_event_count(thread_id, event_type_id):
                    if isinstance(thread_id, int):
                        thread_cond = f"= {thread_id}"
                    else:
                        thread_cond = f"IN ({','.join(thread_id)})"
                    return fetchone(
                        f"SELECT sum(event_count) FROM perf_sample WHERE thread_id {thread_cond} AND event_type_id = {event_type_id} AND timeStamp_trace >= {start_ts} AND timeStamp_trace <= {finish_ts};"
                    )["sum(event_count)"]

                js_cycle = get_event_count(js_thread_id, cpu_cycle_id)
                ui_cycle = get_event_count(ui_thread_id, cpu_cycle_id)
                tot_cycle = get_event_count(all_thread_ids, cpu_cycle_id)

                js_instruction = get_event_count(js_thread_id, instruction_count_id)
                ui_instruction = get_event_count(ui_thread_id, instruction_count_id)
                tot_instruction = get_event_count(all_thread_ids, instruction_count_id)
                result = {
                    **result,
                    "js_cycle": js_cycle,
                    "ui_cycle": ui_cycle,
                    "tot_cycle": tot_cycle,
                    "js_instruction": js_instruction,
                    "ui_instruction": ui_instruction,
                    "tot_instruction": tot_instruction,
                }
            except AssertionError:
                print("No hiperf data, skipping load analysis")

            count_and_tot_time_ui = partial(
                count_and_tot_time, cur=cur, thread_id=ui_thread_id, sts=ui_start_time, ets=finish_ts
            )
            result = {
                **result,
                # ui thread
                **count_and_tot_time_ui(name_pattern="H:#RNOH::TaskRunner::task%"),
                **count_and_tot_time_ui(name_pattern="H:#RNOH::TaskExecutor::runTask%"),  # task producer
                **count_and_tot_time_ui(name_pattern="H:#RNOH::MountingManager::didMount %"),
                ## under a vsync
                **count_and_tot_time_ui(name_pattern="H:ReceiveVsync %dataCount: 24bytes now:%"),
                **count_and_tot_time_ui(name_pattern="H:FlushDirtyNodeUpdate%"),
                **count_and_tot_time_ui(name_pattern="H:AddDirtyLayoutNode[%][self:%][parent:%][key:%]%"),
                **count_and_tot_time_ui(name_pattern="H:FlushLayoutTask%"),
                **count_and_tot_time_ui(name_pattern="H:Measure[%][self:%][parent:%][key:%]%"),  # under layout
                **count_and_tot_time_ui(name_pattern="H:Layout[%][self:%][parent:%][key:%]%"),  # under layout
                **count_and_tot_time_ui(name_pattern="H:FlushSyncGeometryNodeTasks%"),  # under layout
                **count_and_tot_time_ui(name_pattern="H:FlushRenderTask %"),
                **count_and_tot_time_ui(name_pattern="H:RSModifierManager Draw num:%"),
                **count_and_tot_time_ui(name_pattern="H:FlushMessages%"),
                # view
                **count_and_tot_time_ui(name_pattern="H:CreateTaskMeasure[Custom][self:%"),
                **count_and_tot_time_ui(name_pattern="H:CreateTaskLayout[Custom][self:%"),
                **count_and_tot_time_ui(name_pattern="H:SavePaintRect[Custom][self:%"),
                **count_and_tot_time_ui(name_pattern="H:OnLayoutFinish[Custom][self:%"),
                # **count_and_tot_time(cur, "H:UIManager::createNode"),
                # **count_and_tot_time(cur, "H:UIManager::cloneNode"),
                # **count_and_tot_time(cur, "H:UIManager::appendChild"),
                # **count_and_tot_time(cur, "H:Differentiator::calculateShadowViewMutationsV2"),
                # **count_and_tot_time(cur, "H:NativeNodeApi::getInstance()->createNode"),
                # **count_and_tot_time(cur, "H:CustomNode::insertChild"),
                # **count_and_tot_time(cur, "H:CppComponentInstance::onPropsChanged"),
                **count_and_tot_time(
                    cur,
                    "%paint[offset:%",  # arkui text
                    thread_id=ui_thread_id,
                    sts=ui_start_time,
                    ets=finish_ts,
                ),
                "ten_max_freq": ten_max_freq,
                "ten_avg_freq": ten_avg_freq,
                "six_max_freq": six_max_freq,
                "six_avg_freq": six_avg_freq,
                "large_busy_js": js_all_cpu_usage["cpu_times_pct"].get(10, 0)
                + js_all_cpu_usage["cpu_times_pct"].get(11, 0),
                "large_busy_ui": ui_all_cpu_usage["cpu_times_pct"].get(10, 0)
                + ui_all_cpu_usage["cpu_times_pct"].get(11, 0),
                "large_busy": all_cpu_usage["cpu_times_pct"].get(10, 0) + all_cpu_usage["cpu_times_pct"].get(11, 0),
                "js_running": js_cpu_stats["cpu_states"].get("Running", 0) / js_stage_time,
                "js_large_core": js_cpu_stats["cpu_times_pct"].get("l", 0),
                "js_mid_core": js_cpu_stats["cpu_times_pct"].get("m", 0),
                "ui_running": ui_cpu_stats["cpu_states"].get("Running", 0) / ui_stage_time,
                "ui_large_core": ui_cpu_stats["cpu_times_pct"].get("l", 0),
                "ui_mid_core": ui_cpu_stats["cpu_times_pct"].get("m", 0),
                "prealloc_run_pct": sum(prealloc_cpu_stats["cpu_times"].values()) / js_stage_time,
                "prealloc_large_core": prealloc_cpu_stats["cpu_times_pct"].get("l", 0),
                "prealloc_large_core": prealloc_cpu_stats["cpu_times_pct"].get("l", 0),
                "prealloc_mid_core": prealloc_cpu_stats["cpu_times_pct"].get("m", 0),
                "js_slice_count": js_slice_count,
                "ui_slice_count": ui_slice_count,
                "tot_slice_count": js_slice_count + ui_slice_count,
                # debug info
                "same_vsync": last_vsync["name"] == vsync_after_last_did_mount["name"],
                "start_time": (start_ts - trace_start_time) / 10**6,
                "finish_time": (finish_ts - trace_start_time) / 10**6,
                "js_end_time": (start_ts + js_stage_slice["dur"] - trace_start_time) / 10**6,
                "ui_start_time": (ui_start_time - trace_start_time) / 10**6,
                "alert_time": "" if alert_slice is None else (alert_slice["ts"] + alert_slice["dur"]) / 10**6,
                "last_slice_name": last_vsync["name"],
            }
            results.append(result)

        except Exception as e:
            import traceback

            print(f"Error processing {trace_path} ")
            print(traceback.format_exc())
            continue

    results = pd.DataFrame(results)

    if not processed.empty:
        results = pd.concat([results, processed], ignore_index=True)

    if len(results) == 0:
        print("No result to save")
        exit(0)

    results.to_csv(trace_fdr / "result.csv", index=False)

    scenarios = list(set(name[: name.rfind("@")] for name in results["trace_name"].tolist()))
    scenarios.sort()
    stats_agg = pd.DataFrame()
    brief_results = []
    to_excel = []

    with pd.ExcelWriter(trace_fdr / f"{trace_fdr.name}_result.xlsx") as writer:
        for scenario in scenarios:
            scenario_results = results[results["trace_name"].str.startswith(scenario)]
            scenario_results = scenario_results.sort_values(by="trace_name", ascending=True)
            numeric_cols = scenario_results.select_dtypes(include=["float64", "int64"]).columns

            stats = [
                {
                    "trace_name": "mean (not filtered)",
                    **{col: scenario_results[col].mean() for col in numeric_cols},
                },
                {
                    "trace_name": "(max - min) / mean (not filtered)",
                    **{
                        col: (scenario_results[col].max() - scenario_results[col].min()) / scenario_results[col].mean()
                        for col in numeric_cols
                    },
                },
                {
                    "trace_name": "sd",
                    **{col: scenario_results[col].std() for col in numeric_cols},
                },
                {
                    "trace_name": "sd/mean",
                    **{col: scenario_results[col].std() / scenario_results[col].mean() for col in numeric_cols},
                },
            ]

            if "full_time_trace" in scenario_results.columns:
                med = np.median(scenario_results["full_time_trace"])
                mad = np.median(np.abs(scenario_results["full_time_trace"] - med))
                mad_norm = mad * 1.4826
                lower_mad = med - 3 * mad_norm
                upper_mad = med + 3 * mad_norm
                scenario_results_filtered = scenario_results[
                    (scenario_results["full_time_trace"] >= lower_mad)
                    & (scenario_results["full_time_trace"] <= upper_mad)
                ]
                stats += [
                    {
                        "trace_name": "sd/mean (filtered)",
                        **{
                            col: scenario_results_filtered[col].std() / scenario_results_filtered[col].mean()
                            for col in numeric_cols
                        },
                    },
                    {
                        "trace_name": "mean (filtered)",
                        **{col: scenario_results_filtered[col].mean() for col in numeric_cols},
                    },
                ]

            stats_row = pd.DataFrame(stats)
            to_excel.append((scenario, pd.concat([scenario_results, stats_row], ignore_index=True)))

            space = pd.DataFrame(
                [
                    {col: "" for col in scenario_results.columns},
                    {col: "" for col in scenario_results.columns},
                    {
                        "trace_name": f"{len(scenario_results_filtered)} {scenario}",
                        **{col: "" for col in numeric_cols},
                    },
                ]
            )
            stats_agg = pd.concat([stats_agg, space, stats_row], ignore_index=True)
            parts = scenario.split("@")

            # view@20250313_203339@5463cb788a0c03560ea7b250a3afa90e09d50331@5.1.0.305@tester@hermes@rom-jsvm@ALN-AL00_51056_SP30DEVC00E56R4P1log@notrooted@27@HUAWEI_Mate_60_Pro@3BE6F0BD@03-21_12-37-12.htrace
            if len(parts) == 11:
                parts.insert(3, "")
            if len(parts) == 12:
                if parts[2] == "7e4166966306c256037986008cf034b5ba57c0e7":
                    parts[3] = "5.1.0.201"
                meta = {
                    "scenario": parts[0],
                    "rom": parts[7],
                    "js_engine": parts[5],
                    "git_tag": parts[3],
                    "commit_time": parts[1],
                    "commit_hash": parts[2],
                    "compiler": "bisheng" if parts[4] == "tester" else parts[4],
                    "run_group": parts[6],
                    "rooted": parts[8] == "rooted",
                    # "test_temperature": parts[9],
                    "phone_model": parts[10],
                    "phone_id": parts[11],
                }
            # image@example@text@ALN-AL00_500123_SP19C00E121R4P25log@rooted@27@HUAWEI_Mate_60_Pro@3BE6F0BD@04-11_07-10-05
            if len(parts) == 8:
                meta = {
                    "scenario": parts[0],
                    "rom": parts[3],
                    "run_group": parts[2],
                    "rooted": parts[4] == "rooted",
                    "test_temperature": parts[5],
                    "phone_model": parts[6],
                    "phone_id": parts[7],
                }
            else:
                meta = {"scenario": scenario}
            brief_results.append(
                {
                    **meta,
                    "group": scenario,
                    "sample_count": len(scenario_results_filtered),
                    **{col: scenario_results_filtered[col].mean() for col in numeric_cols},
                }
            )

            # first_three = scenario_results.sort_values(by="trace_name", ascending=True).head(3)
            # meta["scenario"] = f"{meta['scenario']}-3"
            # brief_results.append(
            #     {
            #         **meta,
            #         "group": scenario,
            #         "full_time_trace": first_three["full_time_trace"].mean(),
            #         "android_pct": first_three["android_pct"].mean(),
            #         "js_stage_time": first_three["js_stage_time"].mean(),
            #         "ui_stage_time": scenario_results_filtered["ui_stage_time"].mean(),
            #         "js_slice_count": scenario_results_filtered["js_slice_count"].mean(),
            #         "ui_slice_count": scenario_results_filtered["ui_slice_count"].mean(),
            #         "samples": len(first_three),
            #         "sd/mean": scenario_results_filtered["full_time_trace"].std()
            #         / scenario_results_filtered["full_time_trace"].mean(),
            #     }
            # )

        to_excel.insert(0, ("results", stats_agg))
        to_excel.insert(0, ("brief_result", pd.DataFrame(brief_results)))

        for name, df in to_excel:
            # Transpose DataFrame and write to Excel, keep original column names as first column
            df_transposed = df.transpose()
            df_transposed.columns = df_transposed.iloc[0]  # Use first row as column headers
            df_transposed = df_transposed.iloc[1:]  # Remove the first row since it's now column headers
            df_transposed.index.name = "Metric"  # Name the index column
            df_transposed.to_excel(writer, sheet_name=name)


if __name__ == "__main__":
    analyze(experiment_name)
