# parser.py
import re
from datetime import datetime


def parse_slow_query_log(log_lines):
    """
    从慢查询日志中解析出 tx 字典
    log_lines: 一个包含多行日志的列表
    """
    tx = {}

    for line in log_lines:
        line = line.strip()

        # 匹配 Query_time
        if line.startswith("# Query_time:"):
            match = re.search(r"Query_time:\s*([\d.]+)", line)
            if match:
                tx['duration_sec'] = float(match.group(1))

        # 匹配 Rows_affected
        if line.startswith("#") and "Rows_affected:" in line:
            match = re.search(r"Rows_affected:\s*(\d+)", line)
            if match:
                tx['rows_affected'] = int(match.group(1))

        # 匹配 SQL 语句（非 # 开头的行）
        if not line.startswith("#") and line and not line.lower().startswith("use "):
            tx['sql'] = line
            break  # 简化：只取第一行 SQL

    return tx if 'duration_sec' in tx else None


# 使用示例
# log_lines = [
#     "# Time: 2025-03-15T10:23:45.123456Z",
#     "# User@Host: webapp[webapp] @ [10.1.2.3]",
#     "# Query_time: 12.50  Lock_time: 0.002  Rows_sent: 1  Rows_examined: 50000  Rows_affected: 48320",
#     "SET timestamp=1710498225;",
#     "UPDATE orders SET status = 'shipped' WHERE created_at < '2023-01-01';"
# ]

tx = parse_slow_query_log("log_lines")
print(tx)
# 输出: {'duration_sec': 12.5, 'rows_affected': 48320, 'sql': "UPDATE orders ..."}