import re
from typing import Dict, Optional

class SparkSQLTotalCountParser:
    """解析Spark SQL输出，提取total_count及其对应的值"""
    def __init__(self):
        # 日志行的正则表达式模式，匹配两种时间格式的日志
        self.log_patterns = [
            # 匹配格式: 25/08/21 16:05:57 WARN Utils: ...
            re.compile(r'^\d{2}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} (INFO|WARN|ERROR|DEBUG|TRACE)'),
            # 匹配格式: 2025-08-21T16:06:28,519 INFO [main] ...
            re.compile(r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2},\d{3} (INFO|WARN|ERROR|DEBUG|TRACE)')
        ]
        
        # 匹配total_count及其值的模式
        self.total_count_pattern = re.compile(r'^\s*total_count\s*$')
        self.value_pattern = re.compile(r'^\s*(\d+)\s*$')
    
    def is_log_line(self, line: str) -> bool:
        """判断一行是否为日志行"""
        line = line.strip()
        for pattern in self.log_patterns:
            if pattern.match(line):
                return True
        return False
    
    def parse_total_count(self, output: str) -> Optional[Dict[str, int]]:
        """
        解析Spark SQL输出，提取total_count及其对应的值
        
        Args:
            output: Spark SQL的输出字符串
            
        Returns:
            包含total_count及其值的字典，如果未找到则返回None
        """
        lines = output.split('\n')
        non_log_lines = [line.strip() for line in lines if line.strip() and not self.is_log_line(line)]
        
        # 查找total_count行及其后面的值行
        for i, line in enumerate(non_log_lines):
            if self.total_count_pattern.match(line):
                # 检查下一行是否为数值
                if i + 1 < len(non_log_lines):
                    value_line = non_log_lines[i + 1]
                    match = self.value_pattern.match(value_line)
                    if match:
                        return {'total_count': int(match.group(1))}
        
        return None

# 示例用法
if __name__ == "__main__":
    # 输入你提供的Spark SQL日志
    spark_output = """
25/08/21 16:05:57 WARN Utils: Service 'SparkUI' could not bind on port 4040. Attempting port 4041.
25/08/21 16:05:57 WARN Utils: Service 'SparkUI' could not bind on port 4041. Attempting port 4042.
25/08/21 16:05:57 WARN Utils: Service 'SparkUI' could not bind on port 4042. Attempting port 4043.
25/08/21 16:05:57 WARN Utils: Service 'SparkUI' could not bind on port 4043. Attempting port 4044.
25/08/21 16:05:57 WARN Utils: Service 'SparkUI' could not bind on port 4044. Attempting port 4045.
25/08/21 16:05:57 WARN Utils: Service 'SparkUI' could not bind on port 4045. Attempting port 4046.
25/08/21 16:05:57 WARN Utils: Service 'SparkUI' could not bind on port 4046. Attempting port 4047.
25/08/21 16:05:57 WARN Utils: Service 'SparkUI' could not bind on port 4047. Attempting port 4048.
25/08/21 16:05:57 WARN Utils: Service 'SparkUI' could not bind on port 4048. Attempting port 4049.
25/08/21 16:05:58 WARN Client: Neither spark.yarn.jars nor spark.yarn.archive is set, falling back to uploading libraries under SPARK_HOME.
2025-08-21T16:06:28,519 INFO [main] org.apache.hadoop.hive.conf.HiveConf - Found configuration file file:/usr/hdp/3.1.5.0-152/spark3/conf/hive-site.xml
2025-08-21T16:06:28,629 INFO [main] SessionState - Hive Session ID = 319ad322-e46d-41bd-b7c5-1125f9d85de1
2025-08-21T16:06:29,157 INFO [main] org.apache.hadoop.hive.metastore.HiveMetaStoreClient - Trying to connect to metastore with URI thrift://hdp144003.bigdata.com:9083
2025-08-21T16:06:29,182 INFO [main] org.apache.hadoop.hive.metastore.HiveMetaStoreClient - Opened a connection to metastore, current connections: 1
2025-08-21T16:06:29,201 INFO [main] org.apache.hadoop.hive.metastore.HiveMetaStoreClient - Connected to metastore.
2025-08-21T16:06:29,201 INFO [main] org.apache.hadoop.hive.metastore.RetryingMetaStoreClient - RetryingMetaStoreClient proxy=class org.apache.hadoop.hive.ql.metadata.SessionHiveMetaStoreClient ugi=hdfs (auth:SIMPLE) retries=1 delay=5 lifetime=0
2025-08-21T16:06:29,431 INFO [main] hive.ql.metadata.Hive - Registering function aes_encrypt org.apache.hadoop.hive.ql.udf.generic.GenericUDFAesEncrypt
total_count
268"""
    
    parser = SparkSQLTotalCountParser()
    result = parser.parse_total_count(spark_output)
    
    if result:
        print(f"解析结果: {result}")
        print(f"total_count的值为: {result['total_count']}")
    else:
        print("未找到total_count及其值")
    