import matplotlib.pyplot as plt
import re

def parse_training_log(log_content):
    """解析训练日志，提取关键指标"""
    iterations = []
    lm_loss_values = []
    learning_rate_values = []
    elapsed_time_values = []
    throughput_values = []
    grad_norm_values = []
    
    # 定义正则表达式模式
    # pattern = r"iteration (\d+)/.*?lm loss: ([\d.E]+).*?learning rate: ([\d.E]+).*?elapsed time per iteration \(ms\): ([\d.]+).*?throughput per GPU \(TFLOP/s/GPU\): ([\d.]+).*?grad norm: ([\d.]+)"
    pattern = r"\[.*?\]\s+iteration\s+(\d+)/\s*\d+\s*\|\s+consumed samples:\s*\d+\s*\|\s+elapsed time per iteration \(ms\):\s*([\d.]+)\s*\|\s+throughput per GPU \(TFLOP/s/GPU\):\s*([\d.]+)\s*\|\s+learning rate:\s*([\d.Ee+-]+)\s*\|\s+global batch size:\s*\d+\s*\|\s+lm loss:\s*([\d.Ee+-]+)\s*\|\s+loss scale:\s*[\d.]+\s*\|\s+grad norm:\s*([\d.]+)"
    
    for line in log_content:
        match = re.search(pattern, line)
        if match:
            iterations.append(int(match.group(1)))
            elapsed_time_values.append(float(match.group(2)))
            throughput_values.append(float(match.group(3)))
            learning_rate_values.append(float(match.group(4)))
            lm_loss_values.append(float(match.group(5)))
            grad_norm_values.append(float(match.group(6)))
    
    return {
        'iterations': iterations,
        'lm_loss': lm_loss_values,
        'learning_rate': learning_rate_values,
        'elapsed_time': elapsed_time_values,
        'throughput': throughput_values,
        'grad_norm': grad_norm_values
    }

def plot_training_metrics(data, save_path='./log.png'):
    """绘制训练指标折线图并保存为PNG"""
    # 设置中文字体支持
    # plt.rcParams["font.family"] = ["SimHei", "WenQuanYi Micro Hei", "Heiti TC"]
    # plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题
    
    fig, axes = plt.subplots(2, 3, figsize=(16, 8), sharex=True)
    # fig.suptitle('训练指标随迭代次数变化趋势', fontsize=16, y=0.99)
    # plt.subplots_adjust(left=0.05, bottom=0.15, right=0.95, top=0.90, wspace=0.2, hspace=0.3)
    axes = axes.reshape(-1)
    
    # 设置全局样式
    # plt.style.use('seaborn-whitegrid')
    
    # 1. 绘制 LM Loss
    ax1 = axes[0]
    ax1.plot(data['iterations'], data['lm_loss'], 'b-', linewidth=1.5, alpha=0.8)
    ax1.set_ylabel('loss', fontsize=12)
    ax1.set_title('Loss', fontsize=14)
    ax1.grid(True, linestyle='--', alpha=0.7)
    
    # 2. 绘制 Learning Rate
    ax2 = axes[1]
    ax2.plot(data['iterations'], data['learning_rate'], 'r-', linewidth=1.5, alpha=0.8)
    ax2.set_ylabel('lr', fontsize=12)
    ax2.set_title('Learning Rate', fontsize=14)
    ax2.grid(True, linestyle='--', alpha=0.7)
    ax2.ticklabel_format(axis='y', style='sci', scilimits=(0,0))
    
    # 3. 绘制 Elapsed Time per Iteration (转换为秒)
    ax3 = axes[2]
    elapsed_time_sec = [t / 1000 for t in data['elapsed_time']]  # 从毫秒转换为秒
    ax3.plot(data['iterations'], elapsed_time_sec, 'g-', linewidth=1.5, alpha=0.8)
    ax3.set_ylabel('time(s)', fontsize=12)
    ax3.set_title('Iterations Time', fontsize=14)
    ax3.grid(True, linestyle='--', alpha=0.7)
    
    # 4. 绘制 Throughout per GPU
    ax4 = axes[3]
    ax4.plot(data['iterations'], data['throughput'], 'm-', linewidth=1.5, alpha=0.8)
    ax4.set_ylabel('throughput(TFLOP/s/GPU)', fontsize=12)
    ax4.set_title('Throughput', fontsize=14)
    ax4.grid(True, linestyle='--', alpha=0.7)
    
    # 5. 绘制 Grad Norm
    ax5 = axes[4]
    ax5.plot(data['iterations'], data['grad_norm'], 'c-', linewidth=1.5, alpha=0.8)
    ax5.set_ylabel('grad_norm', fontsize=12)
    ax5.set_title('Grad Norm', fontsize=14)
    # ax5.set_xlabel('iteration', fontsize=12)
    ax5.grid(True, linestyle='--', alpha=0.7)
    
    # 自动调整布局
    plt.tight_layout()
    plt.subplots_adjust(top=0.94)

    # 保存图像
    plt.savefig(save_path, dpi=300, bbox_inches='tight')
    print(f"图像已保存至: {save_path}")
    
    # 如果需要显示图像，取消下面这行的注释
    # plt.show()

if __name__ == "__main__":
    # 示例日志内容 - 实际使用时应替换为从文件读取
    # log_content = [
    #     '[2025-05-24 11:34:53] iteration 34/ 2000 | consumed samples: 544 | elapsed time per iteration (ms): 3081.4 | throughput per GPU (TFLOP/s/GPU): 391.9 | learning rate: 1.242045E-06 | global batch size: 16 | lm loss: 2.527292E+00 | loss scale: 1.0 | grad norm: 60.009 | number of skipped iterations: 0 | number of nan iterations: 0 |',
    #     '[2025-05-24 11:34:56] iteration 35/ 2000 | consumed samples: 560 | elapsed time per iteration (ms): 3089.4 | throughput per GPU (TFLOP/s/GPU): 390.9 | learning rate: 1.241477E-06 | global batch size: 16 | lm loss: 2.242202E+00 | loss scale: 1.0 | grad norm: 25.897 | number of skipped iterations: 0 | number of nan iterations: 0 |',
    #     '[2025-05-24 11:35:00] iteration 36/ 2000 | consumed samples: 576 | elapsed time per iteration (ms): 3269.6 | throughput per GPU (TFLOP/s/GPU): 369.3 | learning rate: 1.240909E-06 | global batch size: 16 | lm loss: 1.815998E+00 | loss scale: 1.0 | grad norm: 23.991 | number of skipped iterations: 0 | number of nan iterations: 0 |',
    #     '[2025-05-24 11:35:03] iteration 37/ 2000 | consumed samples: 592 | elapsed time per iteration (ms): 3121.0 | throughput per GPU (TFLOP/s/GPU): 386.9 | learning rate: 1.240341E-06 | global batch size: 16 | lm loss: 2.360538E+00 | loss scale: 1.0 | grad norm: 67.980 | number of skipped iterations: 0 | number of nan iterations: 0 |'
    # ]
    
    # 从日志文件读取内容
    with open('/data01/huawei-2025/zsz/mindspeed_llm/MindSpeed-LLM/tune_qwen3-0.6b_alpaca.log', 'r') as f:
        log_content = f.readlines()
    
    # 解析日志并绘制图表
    training_data = parse_training_log(log_content)
    plot_training_metrics(training_data, save_path='./log.png')