# scp -i /home/vllm/keypair-dwe-siye-001.pem 7.150.8.243:/root/workspace/ascend-vllm/logs/prefill_server.log ./prefill_server1.log
# scp -i /home/vllm/keypair-dwe-siye-001.pem 7.150.9.191:/root/workspace/ascend-vllm/logs/*.log ./

import re
import os
import sys
import ast
import numpy as np
import pandas as pd
from openpyxl import Workbook
from openpyxl.chart import ScatterChart, Reference, Series
from openpyxl.chart.marker import Marker
from openpyxl.drawing.fill import PatternFillProperties, ColorChoice
from datetime import datetime
import colorsys
import random

action_dict = {
    'Start to schedule':                                   '调度进程收到请求',
    'PNode':                                               'P节点',
    'DNode':                                               'D节点',
    'Prefill id':                                          'Prefill ID',
    'Finish to choose device and add request':             '选择设备，准备传递请求',
    'Start to send request to pd api server':              '准备发送请求给prefill api server',
    'PD api server get request':                           'prefill api server收到请求',
    'Prefill add waiting queue':                           'prefill 请求添加到waiting队列',
    'Get prefill engine request and start pickle':         '触发engine处理请求',
    'Start process request in prefill engine':             'engineD准备开始处理输入请求',
    'Finish process request in prefill engine':            'engine结束tokennizer',
    'try to schedule in waiting queue':                    '首次尝试加入running队列',
    'fail to add result of kv insufficient':               '首次kv不足加入失败',
    'fail to add result of can not schedule':              '首次seq长度超长加入失败',
    # 'Client put request in queue':                         'Prefill client 收到output结果',
    # 'Client get request from queue':                       'Prefill client 从队列中get结果',
    'Prefill get new_blocks':                              'P侧申请完成KV',
    'success add to seq groups':                           '成功加入running队列',
    'Finish prefill pickle and start response':            'api server收到请求准备返回',
    'Get response from pd api server':                     '调度进程收到api server返回的响应',
    'Finish to chosse device and start decode generate':   '调度进程选择decoder device 准备发送请求',
    'Finish to decode':                                    '调度进程完成发送',
    'Enter decode to generate':                            'decode api server收到请求准备处理',
    'Get decode engine request and start pickle':          '调用异步generate结束',
    'Start to dispatch decode request':                    '进入engine分发请求',
    'Add need pullling sequence':                          '添加到need pulling队列',
    'waiting_pull_len':                                    '添加后need pulling队列长度',
    'Start append running sequece for decode':             'pull kv结束添加到running队列',
    'Start to send output':                                '触发首个decode token执行',
    # 'Step in prapre input is driver workder':              '首次进入输入准备',
    'Second to send output':                               '触发第二个decode token执行',
    'Finish decode pickle and start response':             'api server收到推理结果',
    'Start engine step':                                   'engine step开始执行',
    'Prefill start execute_model':                         'P开始execute model',
    'Prefill done execute_model':                          'P完成execute model',
    'Finish engine step':                                  'engine step结束执行',
    'Batch num':                                           'prefill 组batch数量',
    'Token num':                                           'prefill 同批次tokens总数',
    'Client get prefill output':                           'Client收到输出token',
    'Pop output queues':                                   'Client异步队列出队',
    'Start to send output in prefill stage':               'engine开始发送输出结果',
    'Start pull kv':                                       '开始pull kv',
    'Finish pull kv':                                      '完成pull kv',
    'Prefill free kv blocks':                               'P侧释放KV',
    'Client get decode output':                            'decoder client收到请求开始推理',
    'Client get second decode output':                     'decoder client收到第二个token请求开始推理',
    'decoder last to send output':                         '触发最后一个decode token执行',
    'Client get last decode output':                       'decoder client收到最后一个token请求开始推理',
    'First decode output token':                           'decoder返回第一个token',
    'Scend decode output token':                           'decoder返回第二个token',
    'Get first token':                                     'proxy收到第一个token',
}
titles = [
'请求ID',                                          # req_id
'Prefill ID',                                        # Prefill id
"请求长度",                                          # Seq len
"decode输出的tokens",                                  # decode toknes number
"处理请求P节点ID",                                     # PNode
"处理请求D节点ID",                                     # Dnode
'调度进程收到请求',                                  # Start to schedule
'选择设备，准备传递请求',                               # Finish to choose device and add request
'准备发送请求给prefill api server',                     # Start to send request to pd api server
'prefill api server收到请求',                            # PD api server get request
'触发engine处理请求',                                  # Get prefill engine request and start pickle
'engine结束tokennizer',                               # Finish process request in prefill engine
'engine准备开始处理输入请求',                            # Start process request in prefill engine
'prefill 请求添加到waiting队列',                          # Prefill add waiting queue
'engine step开始执行',                                # Start engine step
'首次尝试加入running队列',                              # try to schedule in waiting queue
'首次kv不足加入失败',                                   # fail to add result of kv insufficient
'首次seq长度超长加入失败',                                  # fail to add result of can not schedule
'P侧申请完成KV',                                        # Prefill get new_blocks
'成功加入running队列',                                  # success add to seq groups
'P开始execute model',                                  # Prefill start execute_model
'P完成execute model',                                  # Prefill done execute_model
'engine step结束执行',                                  # Finish engine step
'engine异步发送输出',                                    # Start to send output in prefill stage #0613
'client收到输出并入队',                                  # Client get prefill output #0613
'client出队',                                        # Pop output queues #0613
'api server收到请求准备返回',                             # Finish prefill pickle and start response
'调度进程收到api server返回的响应',                      # Get response from pd api server
'调度进程选择decoder device 准备发送请求',               # Finish to chosse device and start decode generate
'调度进程完成发送',                                  # Finish to decode
'decode api server收到请求准备处理',                    # Enter decode to generate
'调用异步generate结束',                                 # Get decode engine request and start pickle
'进入engine分发请求',                                 # Start to dispatch decode request
'添加到need pulling队列',                                 # Add need pullling sequence
'添加后need pulling队列长度',                          # waiting_pull_len
'开始pull kv',                                        # Start pull kv #0613
'结束pull kv',                                        # Finish pull kv #0613
'P侧释放KV(和前后列时间戳可能存在时钟误差)',                                          # Prefill free kv blocks
'pull kv结束添加到running队列',                        # Start append running sequece for decode
'触发首个decode token执行',                           # Start to send output
# 'decoder client收到请求开始推理',                      # Client get decode output
# '触发第二个decode token执行',                            # Second to send output
# 'decoder client收到第二个token请求开始推理',         # Client get second decode output
# '触发最后一个decode token执行',                       # decoder last to send output
# 'decoder client收到最后一个token请求开始推理',          # Client get last decode output
'decoder返回第一个token',                            # First decode output token
'proxy收到第一个token',                              # Get first token
'decoder返回第二个token',                            # Scend decode output token
'api server收到推理结果'                              # Finish decode pickle and start response

]

def decode_worker_step(node, engine_core_str, lines_decode_worker_step):
    node_key = node + '|' + engine_core_str
    worker_step : int = None
    if node_key in lines_decode_worker_step.keys():
        worker_step = lines_decode_worker_step[node_key]
    return worker_step

def parse_file(folder_path):
    lines = []
    lines_decode_step = []
    lines_decode_worker_step = {}
    metric = []
    for item in os.listdir(folder_path):
        item_path = os.path.join(folder_path, item)
        if os.path.isfile(item_path):  # 过滤文件
            print(item_path) # 解析文件
            with open(item_path, 'r', encoding='utf-8') as file:
                for line in file:
                    line = line.strip()
                    if not line:
                        continue
                    if 'CompletionMetric' in line:
                        metric.append(line)
                        continue
                    if 'profile' not in line:
                        continue
                    node_info = None
                    if '_NODE_' in item:
                        idx0 = item.find('_NODE_') + len('_NODE_')
                        idx1 = item.find('_', idx0)
                        node_info = item[idx0:idx1]
                    if 'Times' in line:
                        lines.append(line)
                    if 'engine_step start' in line:  # 含有engine_step start的文件必然有_NODE_信息
                        if node_info.startswith('P'):
                            lines.append(line)
                        else:
                            lines_decode_step.append(line)
                    
                    if node_info != None:
                        if 'Times' in line or node_info.startswith('P'):
                            lines[-1] = lines[-1] + '|NODE=' + node_info + '.'
                        elif 'worker_step start' in line:
                            # profile: worker_step start:4790|1751437716.6964805|model_step=1097
                            id0 = line.find('worker_step start:')
                            id1 = line.find('|', id0)
                            id2 = line.find('|', id1+1)
                            d_step_key = node_info + '|' + line[id0 + len('worker_step start:'):id2]
                            worker_step = int(line[id2 + len('|model_step='):])
                            lines_decode_worker_step[d_step_key] = worker_step
                        else:
                            lines_decode_step[-1] = lines_decode_step[-1] + '|NODE=' + node_info + '.'
                            idx0 = item.find('_NODE_') + len('_NODE_')
                            idx1 = item.find('_', idx0) + 1
                            idx0 = item.find('_', idx1)
                            pid = item[idx1:idx0]
                            lines_decode_step[-1] = lines_decode_step[-1] + '|PID=' + pid + '.'

    print(f'read all files done')

    data1 = [] # req_ids : start engine time, end engine time, 同一个batch总的tokens
    data1_decode = {}
    data1_spread = {} # req_id, prefill_id, start engine time, end engine time, 同一个batch总的tokens
    data2 = {} # req_id : timstamp actions
    data3 = {} #reqid : decoder tokens
    for line in metric:
        if 'CompletionMetric' in line:
            # print(f"{line=}")
            idx0 = line.find('profile REQ_ID[chatcmpl-') + len('profile REQ_ID[chatcmpl-')
            idx1 = line.find(']', idx0)
            req_id = line[idx0:idx1]
            idx0 = line.find('num_completion_tokens=') + len('num_completion_tokens=')
            idx1 = line.find('.', idx0)
            output_tokens = line[idx0:idx1]
            # print(f"{output_tokens=}")
            data3[req_id] = int(output_tokens)
    print(f'parse CompletionMetric done')

    for line in lines_decode_step:
        id = 0

        if 'engine_step start' in line:
            # print(f"{line=}")
            id0 = line.find('engine_step start:')
            id1 = line.find('|finish:', id0)
            start_timestamp = line[id0 + len('engine_step start:'):id1]
            start_timestamp = float(start_timestamp)
            id0 = line.find('|finish:')
            id1 = line.find('|execute time:', id0)
            finish_timestamp = line[id0 + len('|finish:'):id1]
            id0 = line.find('|execute time:')
            id1 = line.find('|seqs:', id0)
            execute_time = line[id0 + len('|execute time:'):id1]
            id0 = line.find('|seqs:')
            id1 = line.find('|tokens:', id0)
            seqs = line[id0 + len('|seqs:'):id1]
            id0 = line.find('|tokens:')
            id1 = line.find('|waiting_reqs_num_after_step=', id0)
            tokens = line[id0 + len('|tokens:'):id1]
            if int(tokens) == 0:
                continue
            id0 = line.find('|waiting_reqs_num_after_step=')
            id1 = line.find('|reqs_ids=', id0)
            waiting_num = line[id0 + len('|waiting_reqs_num_after_step='):id1]
            id0 = line.find('|reqs_ids=')
            id1 = line.find('|bs_tokens=', id0)
            reqs = line[id0 + len('|reqs_ids='):id1]
            pattern = r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}'
            uuids_reqs = re.findall(pattern, reqs)
            # print(f"{uuids_reqs=}")
            id0 = line.find('|bs_tokens=')
            id1 = line.find('|execute_model_start_time=', id0)
            tokens_per_req = ast.literal_eval(line[id0 + len('|bs_tokens='):id1])
            id0 = line.find('|execute_model_start_time=')
            id1 = line.find('|execute_model_end_time=', id0)
            model_start_timestamp = line[id0 + len('|execute_model_start_time='):id1]
            id0 = line.find('|execute_model_end_time=')
            id1 = line.find('|execute_model_cost_time=', id0)
            model_finish_timestamp = line[id0 + len('|execute_model_end_time='):id1]
            id0 = line.find('|execute_model_cost_time=')
            id1 = line.find('|kv_cache_usage=', id0)
            model_execute_time = line[id0 + len('|execute_model_cost_time='):id1]
            id0 = line.find('|kv_cache_usage=')
            id1 = line.find('|kv_blocks_num=', id0)
            kv_cache_usage = line[id0 + len('|kv_cache_usage='):id1]
            kv_cache_usage = float(kv_cache_usage)
            id0 = line.find('|kv_blocks_num=')
            id1 = line.find('|start_free_block_num=', id0)
            kv_blocks_num = line[id0 + len('|kv_blocks_num='):id1]
            id0 = line.find('|start_free_block_num=')
            id1 = line.find('|end_free_block_num=', id0)
            start_free_block_num = line[id0 + len('|start_free_block_num='):id1]
            id0 = line.find('|end_free_block_num=')
            id1 = line.find('|cost_blocks_num=', id0)
            end_free_block_num = line[id0 + len('|end_free_block_num='):id1]
            id0 = line.find('|cost_blocks_num=')
            id1 = line.find('|engine_core_str=', id0)
            cost_blocks_num = line[id0 + len('|cost_blocks_num='):id1]
            id0 = line.find('|engine_core_str=')
            id1 = line.find('|NODE=', id0)
            engine_core_str = line[id0 + len('|engine_core_str='):id1]
            id0 = line.find('|NODE=')
            id1 = line.find('.', id0)
            node = line[id0 + len('|NODE='):id1]
            id0 = line.find('|PID=', id1)
            id1 = line.find('.', id0)
            pid = line[id0 + len('|PID='):id1]
            worker_step = decode_worker_step(node, engine_core_str, lines_decode_worker_step)
            if worker_step is None:
                print(f'not find worker_step for [{line}]')
            dict_key = node + '_' + pid
            if dict_key not in data1_decode.keys():
                data1_decode[dict_key] = []
            data1_decode[dict_key].append([start_timestamp, finish_timestamp, execute_time, seqs, int(tokens), waiting_num, \
                float(model_start_timestamp), float(model_finish_timestamp), float(model_execute_time), kv_cache_usage, kv_blocks_num, start_free_block_num, \
                end_free_block_num, cost_blocks_num, worker_step])
            id += 1
    print(f'parse decode_step done')

    for line in lines:
        id = 0

        if 'engine_step start' in line:
            # print(f"{line=}")
            id0 = line.find('engine_step start:')
            id1 = line.find('|finish:', id0)
            start_timestamp = line[id0 + len('engine_step start:'):id1]
            start_timestamp = float(start_timestamp)
            id0 = line.find('|finish:')
            id1 = line.find('|execute time:', id0)
            finish_timestamp = line[id0 + len('|finish:'):id1]
            id0 = line.find('|execute time:')
            id1 = line.find('|seqs:', id0)
            execute_time = line[id0 + len('|execute time:'):id1]
            id0 = line.find('|seqs:')
            id1 = line.find('|tokens:', id0)
            seqs = line[id0 + len('|seqs:'):id1]
            id0 = line.find('|tokens:')
            id1 = line.find('|waiting_reqs_num_after_step=', id0)
            tokens = line[id0 + len('|tokens:'):id1]
            if int(tokens) == 0:
                continue
            id0 = line.find('|waiting_reqs_num_after_step=')
            id1 = line.find('|reqs_ids=', id0)
            waiting_num = line[id0 + len('|waiting_reqs_num_after_step='):id1]
            id0 = line.find('|reqs_ids=')
            id1 = line.find('|bs_tokens=', id0)
            reqs = line[id0 + len('|reqs_ids='):id1]
            pattern = r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}'
            uuids_reqs = re.findall(pattern, reqs)
            # print(f"{uuids_reqs=}")
            id0 = line.find('|bs_tokens=')
            id1 = line.find('|execute_model_start_time=', id0)
            tokens_per_req = ast.literal_eval(line[id0 + len('|bs_tokens='):id1])
            id0 = line.find('|execute_model_start_time=')
            id1 = line.find('|execute_model_end_time=', id0)
            model_start_timestamp = line[id0 + len('|execute_model_start_time='):id1]
            id0 = line.find('|execute_model_end_time=')
            id1 = line.find('|execute_model_cost_time=', id0)
            model_finish_timestamp = line[id0 + len('|execute_model_end_time='):id1]
            id0 = line.find('|execute_model_cost_time=')
            id1 = line.find('|kv_cache_usage=', id0)
            model_execute_time = line[id0 + len('|execute_model_cost_time='):id1]
            id0 = line.find('|kv_cache_usage=')
            id1 = line.find('|kv_blocks_num=', id0)
            kv_cache_usage = line[id0 + len('|kv_cache_usage='):id1]
            kv_cache_usage = float(kv_cache_usage)
            id0 = line.find('|kv_blocks_num=')
            id1 = line.find('|start_free_block_num=', id0)
            kv_blocks_num = line[id0 + len('|kv_blocks_num='):id1]
            id0 = line.find('|start_free_block_num=')
            id1 = line.find('|end_free_block_num=', id0)
            start_free_block_num = line[id0 + len('|start_free_block_num='):id1]
            id0 = line.find('|end_free_block_num=')
            id1 = line.find('|cost_blocks_num=', id0)
            end_free_block_num = line[id0 + len('|end_free_block_num='):id1]
            id0 = line.find('|cost_blocks_num=')
            id1 = line.find('|engine_core_str=', id0)
            cost_blocks_num = line[id0 + len('|cost_blocks_num='):id1]
            id0 = line.find('|engine_core_str=')
            id1 = line.find('|NODE=', id0)
            engine_core_str = line[id0 + len('|engine_core_str='):id1]
            id0 = line.find('|NODE=')
            id1 = line.find('.', id0)
            node = line[id0 + len('|NODE='):id1]
            data1.append([start_timestamp, finish_timestamp, execute_time, seqs, tokens, waiting_num, uuids_reqs, \
                            tokens_per_req, model_start_timestamp, model_finish_timestamp, model_execute_time, \
                            kv_cache_usage, kv_blocks_num, start_free_block_num, end_free_block_num, cost_blocks_num, node])
            # print(f"{data1[-1]=}")
            id += 1

        if 'Times' in line:
            # print(f"{line=}")
            first_index = line.index('profile REQ_ID[')
            second_index = line.index(']', first_index)
            fifth_index = line.index(' action:', second_index)
            sixth_index = line.index('.', fifth_index)
            seventh_index = line.index('Timestamp ', sixth_index)
            last_index = line.index('|', seventh_index)

            # time_str = line[third_index + len('time['):forth_index].strip()
            action = line[fifth_index + len(' action:'):sixth_index].strip()
            timestamp = float(line[seventh_index + len('Timestamp '):last_index].strip())
            first_index = first_index + len('profile REQ_ID[')
            if 'chatcmpl-' in line[first_index:second_index]:
                first_index += len('chatcmpl-')
            req_id  = line[first_index:second_index]
            # print(f"{req_id=}")
            d_node = None
            if 'NODE=D' in line:
                id0 = line.find('NODE=')
                id1 = line.find('.', id0)
                d_node = line[id0 + len('NODE='):id1]

            if 'Start pull kv at' in action:
                point1 = line.find('Start pull kv at ')
                point2 = line.find(' , cost', point1)
                start_timestamp = float(line[point1 + len('Start pull kv at  ')-1:point2].strip())
                if req_id not in data2:
                    data2[req_id] = {'Start pull kv': [start_timestamp], 'Finish pull kv': [timestamp]}
                else:
                    data2[req_id].update({'Start pull kv': [start_timestamp]})
                    data2[req_id].update({'Finish pull kv': [timestamp]})
            elif 'Add need pullling sequence' in action:
                # profile REQ_ID[chatcmpl-10e782ed-b57c-4e86-84da-d3b017cccba8] action:Add need pullling sequence|waiting_pull_len=1.Timestamp 1750948979.6645474
                values = action.split('|')
                action = values[0]
                waiting_pull_len_info = values[1].split('=')
                # print(f"{action=}, {timestamp=}, {waiting_pull_len_info[0]=}, {waiting_pull_len_info[1]=}")
                if req_id not in data2:
                    data2[req_id] = {action: [timestamp], waiting_pull_len_info[0]: [waiting_pull_len_info[1]]}
                else:
                    data2[req_id].update({action: [timestamp]})
                    data2[req_id].update({waiting_pull_len_info[0]: [waiting_pull_len_info[1]]})
            else:
                if action not in action_dict.keys(): # 这两个点位待完善
                    continue
                if req_id not in data2:
                    data2[req_id] = {action:[]}
                    data2[req_id][action].append(timestamp)
                else:
                    if action not in data2[req_id]:
                        data2[req_id].update({action:[]})
                    data2[req_id][action].append(timestamp)
                    if d_node is not None:
                        data2[req_id].update({'DNode': [d_node]})
    print(f'parse and prefill action timestamp done')


    # 把data1按照request维度展开
    for i, data in enumerate(data1):
        for id, req_id in enumerate(data[6]):
            # start_timestamp, finish_timestamp, execute_time, seqs, tokens, waiting_num, uuids_reqs, tokens_per_req, node
            data1_spread[req_id] = (i, data[0], data[1], data[2], data[3], data[4], data[5], data[7][id], data[-1])

    # actions = set()
    # for req, data in data2.items():
    #     actions.update(data.keys())
    # print(actions)
    # exit()
    result = {}
    for req_id, data in data2.items():
        result[req_id] = {}
        for miss_key in action_dict.keys() - data.keys():
            result[req_id].update({miss_key: 'NA'})
        for action,time_list in data.items():
            result[req_id][action] = min(time_list)
            # if action == 'Start to send output':
            #     result[req_id]['Second to send output'] = 0 #time_list[1] # multi step = 4 处理4个token后的时间点
            #     result[req_id]['decoder last to send output'] = time_list[-1]  # multi step = 4 处理4个token后的时间点
            # if action == 'Client get prefill output':
            #     result[req_id]['Client get decode output'] = time_list[1] # multi step = 4 处理4个token后的时间点
            #     result[req_id]['Client get second decode output'] = time_list[2]  # multi step = 4 处理4个token后的时间点
            #     result[req_id]['Client get last decode output'] = time_list[-1]  # multi step = 4 处理4个token后的时间点
    # 将data1_spread data2 data3数据进行汇聚
    # print(f"{data3.keys()=}")
    for req_id, data in result.items():
        if req_id in data1_spread:
            result[req_id].update({"Prefill id":data1_spread[req_id][0]})
            result[req_id].update({"Start engine step":data1_spread[req_id][1]})
            result[req_id].update({"Finish engine step":data1_spread[req_id][2]})
            result[req_id].update({"Batch num":int(data1_spread[req_id][4])})
            result[req_id].update({"Token num":int(data1_spread[req_id][5])}) # engine step总token数
            result[req_id].update({"Seq len": int(data1_spread[req_id][7])})
            result[req_id].update({"PNode":(data1_spread[req_id][8])})
        else:
            result[req_id].update({"Prefill id":'NA'})
            result[req_id].update({"Start engine step":'NA'})
            result[req_id].update({"Finish engine step":'NA'})
            result[req_id].update({"Batch num":'NA'})
            result[req_id].update({"Token num":'NA'})
            result[req_id].update({"Seq len": 'NA'})
            result[req_id].update({"PNode": 'NA'})
        # print(f"{req_id=}")
        if req_id in data3.keys():
            result[req_id].update({"decode token number": data3[req_id]})
        else:
            result[req_id].update({"decode token number": 'NA'})

    return data1, data1_decode, result

def save_to_time_analysis_detail(result, output_path="time_analysis.xlsx"):
    wb = Workbook()
    data_ws = wb.active
    data_ws.title = "time_analysis"
    data_ws.append(titles)
    for req_id, data in result.items():
        if data['Seq len'] == 'NA':
            # print(f"skip req [{req_id}] only in nginx log")
            continue
        data_line = [req_id]
        data_line.append(data['Prefill id'])
        data_line.append(data['Seq len'])
        data_line.append(data['decode token number'])
        data_line.append(data['PNode'])
        data_line.append(data['DNode'])
        data_line.append(data['Start to schedule'])
        data_line.append(data['Finish to choose device and add request'])
        data_line.append(data['Start to send request to pd api server'])
        data_line.append(data['PD api server get request'])
        data_line.append(data['Get prefill engine request and start pickle'])
        data_line.append(data['Finish process request in prefill engine'])
        data_line.append(data['Start process request in prefill engine'])
        data_line.append(data['Prefill add waiting queue'])
        data_line.append(data['Start engine step'])
        data_line.append(data['try to schedule in waiting queue'])
        data_line.append(data['fail to add result of kv insufficient'])
        data_line.append(data['fail to add result of can not schedule'])
        data_line.append(data['Prefill get new_blocks'])
        data_line.append(data['success add to seq groups'])
        data_line.append(data['Prefill start execute_model'])
        data_line.append(data['Prefill done execute_model'])
        data_line.append(data['Finish engine step'])
        data_line.append(data['Start to send output in prefill stage']) #0613
        data_line.append(data['Client get prefill output']) #0613
        data_line.append(data['Pop output queues']) #0613
        data_line.append(data['Finish prefill pickle and start response'])
        data_line.append(data['Get response from pd api server'])
        data_line.append(data['Finish to chosse device and start decode generate'])
        data_line.append(data['Finish to decode'])
        data_line.append(data['Enter decode to generate'])
        data_line.append(data['Get decode engine request and start pickle'])
        data_line.append(data['Start to dispatch decode request'])
        data_line.append(data['Add need pullling sequence'])
        data_line.append(data['waiting_pull_len'])
        data_line.append(data['Start pull kv']) #0613
        data_line.append(data['Finish pull kv']) #0613
        data_line.append(data['Prefill free kv blocks'])
        data_line.append(data['Start append running sequece for decode'])
        data_line.append(data['Start to send output'])
        # data_line.append(data['Client get decode output'])  #0613
        # data_line.append(data['Second to send output']) # 0613
        # data_line.append(data['Client get second decode output']) #0613
        # data_line.append(data['decoder last to send output']) #0613
        # data_line.append(data['Client get last decode output']) #0613
        data_line.append(data['First decode output token'])
        data_line.append(data['Get first token'])
        data_line.append(data['Scend decode output token'])
        data_line.append(data['Finish decode pickle and start response'])
        data_ws.append(data_line)
    wb.save(output_path)
    print(f"已保存结果到 {output_path}")

def save_to_engine_step_detail_prefill(result, data_ws, start_time):
    engine_step_title = ['Engine step开始时间', 'Engine step结束时间', '执行时间(ms)', 'Seq数量','Token数量','处理完成后waiting队列长度','reqids','tokens per req', '模型开始时间', '模型结束时间', '模型执行时间(ms)', \
        'kv usage', 'kv block总数', 'step初始空闲block数', 'step结束空闲block数', 'step新增使用block数', '节点']
    data_ws.title = "engine_step"

    # print(f"排序前：{result}")
    # 按照时间戳排序
    sorted_result = sorted(result, key=lambda x: x[0])
    idx = 0
    for r in sorted_result:
        if r[0] < start_time:
            idx += 1
    # print(f"排序后：{result}")
    # 删除小于时间戳的
    del sorted_result[:idx]

    data_ws.append(engine_step_title)  #
    for r in sorted_result:
        # print(f"node is {r[-1]}")
        processed_row = []
        for cell in r:
            processed_row.append(str(cell))
        data_ws.append(processed_row)

def save_to_engine_step_detail_decode(result_decode, decode_data_ws):
    engine_step_title = ['节点_die', 'Engine step开始时间', 'Engine step结束时间', '执行时间(ms)', 'Seq数量','Token数量','处理完成后waiting队列长度', \
        '模型开始时间', '模型结束时间', '模型执行时间(ms)', 'kv usage', 'kv block总数', 'step初始空闲block数', 'step结束空闲block数', 'step新增使用block数', \
        'step轮次']
    decode_data_ws.append(engine_step_title)  #
    for key, value in result_decode.items():
        for r in value:
            # print(f"node is {r[-1]}")
            processed_row = []
            processed_row.append(key)
            for cell in r:
                if cell is None:
                    processed_row.append(None)
                else:
                    processed_row.append(str(cell))
            decode_data_ws.append(processed_row)

def save_to_engine_step_decode_die_load(result_decode, decode_die_load_ws):
    '''
    # 第一行写入字段名
    decode_die_load_ws.append(list(result_decode.keys()))
    # 后续行写入数据（转置列数据为行）
    for row in zip(*result_decode.values()):
        decode_die_load_ws.append(row)
    '''
    out_dict = {}
    # print(f"{result_decode}")
    for key, value in result_decode.items():
        step_key = key + '_step轮次'
        step = []
        time_key = key + '_模型开始时间'
        time = []
        seqs_key = key + '_Seq数量'
        seqs = []
        tokens_key = key + '_Token数量'
        tokens = []
        waiting_key = key + '_处理完成后waiting队列长度'
        waiting = []
        model_exec_key = key + '_模型执行时间(ms)'
        model_exec = []
        # kv_num_key = key + '_kv block总数'
        # kv_num = []
        kv_free_key = key + '_step结束空闲block数'
        kv_free = []
        kv_cost_key = key + '_step新增使用block数'
        kv_cost = []
        for r in value:
            step.append(r[14])
            time.append(r[6])
            seqs.append(r[3])
            tokens.append(r[4])
            waiting.append(r[5])
            model_exec.append(r[8])
            # kv_num.append(r[10])
            kv_free.append(r[12])
            kv_cost.append(r[13])
        out_dict[step_key] = step
        out_dict[time_key] = time
        out_dict[seqs_key] = seqs
        out_dict[tokens_key] = tokens
        out_dict[waiting_key] = waiting
        out_dict[model_exec_key] = model_exec
        # out_dict[kv_num_key] = kv_num
        out_dict[kv_free_key] = kv_free
        out_dict[kv_cost_key] = kv_cost
    # 第一行写入字段名
    decode_die_load_ws.append(list(out_dict.keys()))
    # 后续行写入数据（转置列数据为行）
    for row in zip(*out_dict.values()):
        decode_die_load_ws.append(row)

def calc_decode_die_time_line_cv(k, tokens, timestamp, result_decode) -> float:
    time_diff = 0.1
    data = [tokens]
    # print(f'time_line : {k} : {timestamp}')
    for key, value in result_decode.items():
        if key == k:
            continue
        for i in range(1, len(value)):
            if value[i][6] > timestamp:
                if timestamp > value[i - 1][6] and timestamp - value[i - 1][6] < time_diff:
                    data.append(value[i - 1][4])
                    # print("select last")
                    # print(f'add {key} : {value[i - 1][6]}')
                    break
                if value[i][6] - timestamp < time_diff:
                    data.append(value[i][4])
                    # print("select current")
                    # print(f'add {key} : {value[i][6]}')
                    break
                print(f"not find for {k} at {timestamp} from {key}")
                break
        # 如果没有找到相近的，不填充

    # print(f"{data=}")
    # 计算平均值
    mean_value = np.mean(data)

    # 计算标准差，ddof=0 表示总体标准差
    # std_value = np.std(data, ddof=0)  # ddof=0 表示总体标准差

    # 计算变异系数
    # coefficient_of_variation = (std_value / mean_value) * 100  # 百分比形式

    # 计算最大&最小值中偏离均值的最大值
    max_value = np.max(data)
    min_value = np.min(data)
    if (mean_value - min_value) > (max_value - mean_value):
        max_value = min_value
    # print(f"{mean_value=}")
    max_div_mean = max_value / mean_value

    return float(max_div_mean)

def generate_distinct_colors(n=64, saturation_min=0.5, saturation_max=0.9,
                             value_min=0.6, value_max=1.0):
    colors = []
    for i in range(n):
        hue = i / n
        saturation = random.uniform(saturation_min, saturation_max)
        value = random.uniform(value_min, value_max)
        rgb = colorsys.hsv_to_rgb(hue, saturation, value)
        hex_color = '#%02x%02x%02x' % (
            int(rgb[0] * 255),
            int(rgb[1] * 255),
            int(rgb[2] * 255)
        )
        colors.append(hex_color)
    return colors

def save_to_engine_step_decode_die_time(result_decode, steps, ws):
    # 计算公共起始时间
    s_time = 4906457181.437  # 2125-06-25 00:44:51.437
    for value in result_decode.values():
        # print(f"{value[0][0]=}")
        if value[0][6] is not None:
            s_time = min(s_time, value[0][6])
    print(f"{s_time=}")

    out_dict = {}
    tokens_plot = {}
    tokens_max_plot = {}
    step_token_dict = {}
    tokens_max = []
    tokens_mean = []
    max_div_mean = []
    for key, value in result_decode.items():
        for r in value:
            if r[4] is None:
                continue
            if r[14] not in step_token_dict.keys():
                step_token_dict[r[14]] = []
            step_token_dict[r[14]].append(r[4])
    # print(f"{step_token_dict=}")
    for _ in steps:
        max = np.max(step_token_dict[_])
        mean = np.mean(step_token_dict[_])
        tokens_max.append(max)
        tokens_mean.append(mean)
        max_div_mean.append(float(max/mean))
    out_dict['step轮次'] = steps
    out_dict['Token数量(Max)'] = tokens_max
    out_dict['Token数量(Avg)'] = tokens_mean
    out_dict['Token数量(Max/Avg)'] = max_div_mean
    # print(f"Token数量统计: {out_dict=}")

    for key, value in result_decode.items():
        time_key = key + '_模型开始时间'
        time = []
        tokens_key = key + '_Token数量'
        tokens = []
        for r in value:
            if r[6] is not None:
                time.append(r[6] - s_time)
            else:
                time.append(None)
            tokens.append(r[4])
        out_dict[time_key] = time
        out_dict[tokens_key] = tokens
        tokens_plot[key] = {'x': time, 'y': tokens}
    # print(f"{out_dict=}")
    # print(f"{tokens_plot=}")
    # print(f"{tokens_max_plot=}")
    # 第一行写入字段名
    ws.append(list(out_dict.keys()))
    # 后续行写入数据（转置列数据为行）
    for row in zip(*out_dict.values()):
        ws.append(row)

    data = list(ws.values)
    if not data:
        raise ValueError("工作表中没有数据。")
    df = pd.DataFrame(data[1:], columns=data[0])

    # 输出示例
    # print("数据前几行：")
    # print(df.head())
    # print("\n数据维度：", df.shape)

    # 画“Token数量”散点图
    chart_tokens = ScatterChart()
    chart_tokens.title = 'step轮次 vs Token数量'
    chart_tokens.style = 13
    chart_tokens.x_axis.title = 'step轮次'
    chart_tokens.y_axis.title = 'Token数量'

    # colors = ["FF0000", "00FF00", "0000FF"]  # 红绿蓝
    colors = ['#ce2e2e', '#c73324', '#b05e4b', '#cb714d', '#da631b', '#f49d51', '#d49f5a', '#e7bf72', '#d9ba5b', '#c9b33e', \
                '#e2d83f', '#f1f746', '#a8b936', '#b6e021', '#91c421', '#81b139', '#77a647', '#6ca744', '#69ea2e', '#72f44e', \
                '#26a714', '#2ae224', '#1af828', '#64fb7c', '#30b451', '#2bd766', '#1acc68', '#2caf72', '#3cd49b', '#3db594', \
                '#1dc2a3', '#5cddd1', '#4caaaa', '#219ba8', '#6dcee5', '#4c96b2', '#237aae', '#41a5fd', '#2f5d99', '#2f5aac', \
                '#3a63de', '#1a36ce', '#2430e8', '#5c59c4', '#5b45f4', '#3b2099', '#815fcc', '#662bbc', '#6619b3', '#a051d6', \
                '#9753b6', '#be5cd9', '#d71df1', '#ac35b0', '#e158d9', '#af3f9e', '#c459a9', '#f74abb', '#a82870', '#db3c86', \
                '#e74582', '#ec7193', '#f6385c', '#ca1526']
    # colors = generate_distinct_colors(n=64)
    for idx, (model_name, model_data) in enumerate(tokens_plot.items()):
        x_col = 'step轮次'
        y_col = model_name + '_Token数量'
        # print(f"{x_col=}, {y_col=}")
        # print(f"{df.columns.get_loc(x_col)=}, {df.columns.get_loc(y_col)=}")
        x_range = Reference(ws, min_col=df.columns.get_loc(x_col)+1, min_row=2, max_row=len(steps)+1)
        y_range = Reference(ws, min_col=df.columns.get_loc(y_col)+1, min_row=2, max_row=len(model_data['y'])+1)

        s = Series(y_range, x_range, title=model_name)
        marker = Marker()
        marker.symbol = 'circle'
        marker.size = 7
        # 设置空心效果和边框颜色
        # marker.spPr = marker.spPr or Marker().spPr
        # marker.spPr.solidFill = None
        
        # # 为不同类别设置不同颜色
        # color = colors[idx]
        # marker.spPr.ln = marker.spPr.ln or Marker().spPr.ln
        # marker.spPr.ln.solidFill = ColorChoice(srgbClr=color)
        # marker.spPr.ln.w = 12700

        # 移除连接线（仅保留散点）
        s.graphicalProperties.line.noFill = True
        s.marker = marker
        chart_tokens.series.append(s)
    ws.add_chart(chart_tokens, "L6")

    # 画“Token数量(Max/Avg)”散点图
    chart_tokens_max = ScatterChart()
    chart_tokens_max.title = "step轮次 vs Token数量(Max/Avg)"
    chart_tokens_max.style = 13
    chart_tokens_max.x_axis.title = "step轮次"
    chart_tokens_max.y_axis.title = "Token数量(Max/Avg)"
    chart_tokens_max.legend = None

    x_col = 'step轮次'
    y_col = 'Token数量(Max/Avg)'
    # print(f"{x_col=}, {y_col=}")
    # print(f"{df.columns.get_loc(x_col)=}, {df.columns.get_loc(y_col)=}")
    x_range = Reference(ws, min_col=df.columns.get_loc(x_col)+1, min_row=2, max_row=len(steps)+1)
    y_range = Reference(ws, min_col=df.columns.get_loc(y_col)+1, min_row=2, max_row=len(steps)+1)

    s = Series(y_range, x_range, title='Token数量(Max/Avg)')
    marker = Marker()
    marker.symbol = 'circle'
    marker.size = 7
    # 设置空心效果和边框颜色
    # marker.spPr = marker.spPr or Marker().spPr
    # marker.spPr.solidFill = None
    
    # # 为不同类别设置不同颜色
    # color = colors[idx]
    # marker.spPr.ln = marker.spPr.ln or Marker().spPr.ln
    # marker.spPr.ln.solidFill = ColorChoice(srgbClr=color)
    # marker.spPr.ln.w = 12700

    # 移除连接线（仅保留散点）
    s.graphicalProperties.line.noFill = True
    s.marker = marker
    chart_tokens_max.series.append(s)
    ws.add_chart(chart_tokens_max, "L25")


def save_to_engine_step_detail(result, result_decode, start_time, start_step, output_path="engine_step.xlsx"):
    wb = Workbook()
    data_ws = wb.active

    save_to_engine_step_detail_prefill(result, data_ws, start_time)
    print(f"写入 {data_ws.title} 完成")

    # print(f"{result_decode=}")

    # result_decode中每一项按照时间进行排序
    for key, value in result_decode.items():
        result_decode[key] = sorted(value, key=lambda x: x[0])
    # print(f"sorted:{result_decode=}")
    # 在时间排序基础上，将时间戳前面的删除
    # del data[:2]  # 删除前两个元素
    for key, value in result_decode.items():
        idx = 0
        for r in value:
            if r[0] < start_time:
                idx += 1
        del value[:idx]

    '''
    # result_decode中每一项按照step轮次进行排序
    for key, value in result_decode.items():
        result_decode[key] = sorted(value, key=lambda x: x[14]) # 当前编号是14
    # print(f"sorted:{result_decode=}")
    # 将前面的xxx轮次删除
    if start_step != 0:
        for key, value in result_decode.items():
            del value[:start_step]
    '''

    decode_data_ws = wb.create_sheet(title="engine_step_decode")
    save_to_engine_step_detail_decode(result_decode, decode_data_ws)
    print(f"写入 {decode_data_ws.title} 完成")

    # 获取所有step编号，为每个die补齐不存在的step
    steps = []
    die_steps_dict = {}
    die_steps = []
    for key, value in result_decode.items():
        die_steps = []
        for i in range(len(value)):
            if len(die_steps) != 0 and value[i][14] == die_steps[-1]:
                print(f'step {value[i][14]} duplicuted in {key}')
            else:
                die_steps.append(value[i][14])
            if not value[i][14] in steps:
                steps.append(value[i][14])
        die_steps_dict[key] = die_steps
    steps.sort()
    # print(f"{steps=}")

    find = False
    for step in steps:
        for key, value in die_steps_dict.items():
            if step not in value:
                find = False
                none_list = [None] * 15
                none_list[14] = step
                for i in range(len(result_decode[key])):
                    if result_decode[key][i][14] > step:
                        find = True
                        result_decode[key].insert(i, none_list)
                        break
                if not find:
                    result_decode[key].append(none_list)
    # print(f"{die_steps_dict=}")
    # print(f"{result_decode=}")

    decode_die_load_ws = wb.create_sheet(title="Decode_die_load")
    save_to_engine_step_decode_die_load(result_decode, decode_die_load_ws)
    print(f"写入 {decode_die_load_ws.title} 完成")

    decode_die_load_time_ws = wb.create_sheet(title="Decode_die_time")
    save_to_engine_step_decode_die_time(result_decode, steps, decode_die_load_time_ws)
    print(f"写入 {decode_die_load_time_ws.title} 完成")

    wb.save(output_path)
    print(f"已保存结果到 {output_path}")

if __name__ == "__main__":
    if len(sys.argv) < 3:
        print("请提供解析目录和时间戳") # 2025-06-25 00:44:51.437
        sys.exit(-1)

    print(f"log path: {sys.argv[1]}, {sys.argv[2]}")
    folder_path = sys.argv[1]
    timestamp_str = sys.argv[2]
    dt_obj = datetime.strptime(timestamp_str, "%Y-%m-%d %H:%M:%S.%f")
    start_time = dt_obj.timestamp()
    # start_time = 0
    start_time += 90 # 跳过爬坡的大概时间
    data1, data1_decode, result = parse_file(folder_path)
    print(f"{start_time=}")
    print(f"{len(data1)=}")
    print(f"{len(data1_decode)=}")
    print(f"{len(result)=}")
    save_to_time_analysis_detail(result)
    # start_step = 2200
    start_step = 0
    save_to_engine_step_detail(data1, data1_decode, start_time, start_step)

