"""
    百万级别日志文件读取，找出最常出现的10个字段值
"""
import heapq
import time

# 一次读取10M数据
def read_in_chunks(file_object, chunk_size=1024 * 1024 * 10):
    while True:
        data = file_object.read(chunk_size)
        if not data:
            break
        yield data


# 保存字段和次数
field_counts = {}
# 最小堆，保存出现次数最大的十个字段
top_fields = []
# 预期字段数量
num_expected_fields = 7


# 处理行数据
def process_line(line):
    try:
        # 去除行尾的换行符
        line = line.strip()
        # 分割字段，使用逗号作为分隔符，并忽略多余的逗号
        fields = [f.strip() for f in line.split(',') if f]

        # 确保字段数量一致，不足的用空字符串补齐
        if len(fields) < num_expected_fields:
            # return
            fields.extend([''] * (num_expected_fields - len(fields)))

        # 处理每个字段
        for field in fields:
            if field:  # 确保字段非空
                if field not in field_counts:
                    field_counts[field] = 0
                field_counts[field] += 1
                update_top_fields(field, field_counts[field])
        print(field_counts)
    except Exception as e:
        print(f"Error processing line: {line}. Error: {e}")


# 更新最小堆
def update_top_fields(field, count):
    if len(top_fields) < 10:
        heapq.heappush(top_fields, (count, field))
    else:
        if count > top_fields[0][0]:
            heapq.heapreplace(top_fields, (count, field))


def read_log_chunk(file_path):
    with open(file_path, 'r', encoding='utf-8') as file:
        for chunk in read_in_chunks(file):
            lines = chunk.splitlines()
            for line in lines:
                process_line(line)


def get_result():
    # 从堆中获取并排序前10个最常见的字段值
    top_10 = [field for count, field in sorted(top_fields, key=lambda x: (-x[0], x[1]))]
    print("Top 10 most frequent fields:", top_10)


if __name__ == '__main__':
    # 开始时间
    start_time = time.time()

    log_path = 'db.log'
    read_log_chunk(log_path)
    get_result()

    # 结束时间
    end_time = time.time()

    # 输出总耗时
    total_time = end_time - start_time
    print(f"Total execution time: {total_time:.2f} seconds")
