"""
    百万级别日志文件读取，找出最常出现的10个字段值
    数据分区+多线程
"""
import heapq
import threading
from concurrent.futures import ThreadPoolExecutor
import timeit

# 初始化变量
num_expected_fields = 7  # 预期字段数量
file_path = 'db.log'
num_partitions = 4  # 分区数量

# 创建分区计数器
partition_field_counts = [{} for _ in range(num_partitions)]
# 初始化分区的最小堆
top_fields = []

# 锁对象
lock = threading.Lock()


# 一次读取10M数据
# def read_in_chunks(file_object, chunk_size=1024 * 1024 * 10):
#     while True:
#         data = file_object.read(chunk_size)
#         if not data:
#             break
#         yield data

def read_in_chunks(file, n):
    """读取文件的前n行"""
    lines = []
    for line in file:
        lines.append(line.strip())
        if len(lines) >= n:
            yield lines
    # return lines


# 线程局部变量
atomic_field_counts = threading.local()


def update_partition_top_fields(partition_index, field, count):
    """更新分区中的最小堆"""
    with lock:
        cur_partition_top_fields = top_fields  # 获取分区的最小堆

        if len(cur_partition_top_fields) < 10:
            heapq.heappush(cur_partition_top_fields, (count, field))
        else:
            if count > cur_partition_top_fields[0][0]:
                heapq.heapreplace(cur_partition_top_fields, (count, field))


def process_line(partition_index, line):
    try:
        # 去除行尾的换行符
        line = line.strip()
        # 分割字段，使用逗号作为分隔符，并忽略多余的逗号
        fields = [f.strip() for f in line.split(',') if f]

        # 如果字段数量少于预期的字段数量，则丢弃这一行
        if len(fields) < num_expected_fields:
            # return
            fields.extend([''] * (num_expected_fields - len(fields)))

        # 处理每个字段
        for field in fields:
            if field:  # 确保字段非空
                if not hasattr(atomic_field_counts, 'counts'):
                    atomic_field_counts.counts = {}

                counts = atomic_field_counts.counts

                if field not in counts:
                    counts[field] = 0
                counts[field] += 1
                update_partition_top_fields(partition_index, field, counts[field])
    except Exception as e:
        print(f"Error processing line: {line}. Error: {e}")


# 总计数字典
field_counts = {}
# 总最小堆
top_fields = []


# 使用多线程并行处理
def process_partition(partition_index, start, end):
    for i in range(start, end):
        process_line(partition_index, chunks[i])


# 更新全局最小堆
def update_top_fields(field, count):
    if len(top_fields) < 10:
        heapq.heappush(top_fields, (count, field))
    else:
        if count > top_fields[0][0]:
            heapq.heapreplace(top_fields, (count, field))


def main():
    global chunks
    """
        这里是一次性读取了五行，实际上如果数据量百万的情况下，每次都应该重新读取。因为，
        第一，python不支持从第几行开始读，只能从头开始读取
        第二，假设第一次读取了1-100行，第二次就应该读取101-200行，
        而又不能从第几行开始读，所以只能每个线程都指定开始和结束位置，然后每个线程都自己从头开始读
    """
    # 读取文件并分割成块
    with open(file_path, 'r', encoding='utf-8') as file:
        chunks = list(read_in_chunks(file, 5))
    # 一块10M，一共有多少块
    total_chunks = len(chunks)
    # 每个分区是几块
    chunk_per_thread = total_chunks // num_partitions

    # 使用多线程并行处理
    with ThreadPoolExecutor(max_workers=num_partitions) as executor:
        futures = [
            executor.submit(
                process_partition,
                partition_index,  # 第几个分区
                partition_index * chunk_per_thread,  # start
                (partition_index + 1) * chunk_per_thread if partition_index < num_partitions - 1 else total_chunks
                # end
            )
            for partition_index in range(num_partitions)
        ]

    # 等待所有任务完成
    for future in futures:
        future.result()

    # 合并分区计数器
    # for partition in partition_field_counts:
    #     for field, count in partition.items():
    #         if field not in field_counts:
    #             field_counts[field] = 0
    #         field_counts[field] += count

    # 合并分区中的最小堆
    for count, field in top_fields:
        update_top_fields(field, count)

    # 从堆中获取并排序前10个最常见的字段值
    top_10 = [field for count, field in sorted(top_fields, key=lambda x: (-x[0], x[1]))]
    print("Top 10 most frequent fields:", top_10)


if __name__ == "__main__":
    start_time = timeit.default_timer()
    main()
    end_time = timeit.default_timer()
    print(f"Total execution time: {end_time - start_time:.2f} seconds")
