def process_large_file_in_chunks(filename, chunk_size=1024):
    """使用hint分块处理大文件"""
    total_lines = 0
    chunk_count = 0

    with open(filename, 'r', encoding='utf-8') as file:
        while True:
            # 读取约chunk_size字节的数据
            lines = file.readlines(chunk_size)

            if not lines:  # 文件结束
                break

            chunk_count += 1
            total_lines += len(lines)

            # 处理当前块
            process_chunk(lines, chunk_count)

            print(f"块 {chunk_count}: 处理了 {len(lines)} 行")

    print(f"总共处理了 {chunk_count} 个块，{total_lines} 行")


def process_chunk(lines, chunk_num):
    """处理数据块的示例函数"""
    # 这里可以添加实际的处理逻辑
    for line in lines:
        print(line)


def memory_efficient_file_processing(input_file, output_file, max_memory_mb=50):
    """内存优化的文件处理"""
    import sys

    # 计算每次读取的最大字节数
    max_bytes = max_memory_mb * 1024 * 1024

    processed_count = 0

    with open(input_file, 'r', encoding='utf-8') as infile, \
            open(output_file, 'w', encoding='utf-8') as outfile:

        while True:
            # 控制每次读取的数据量
            lines = infile.readlines(max_bytes)

            if not lines:
                break

            # 处理当前批次
            processed_lines = []
            for line in lines:
                processed_line = process_line(line.strip())
                processed_lines.append(processed_line + '\n')

            # 写入输出文件
            outfile.writelines(processed_lines)
            processed_count += len(lines)

            # 内存使用情况
            memory_usage = sys.getsizeof(lines) + sys.getsizeof(processed_lines)
            print(f"已处理: {processed_count} 行, 当前内存使用: {memory_usage / 1024 / 1024:.2f} MB")


def process_line(line):
    """示例处理函数"""
    return line.upper()


def monitor_log_tail(log_file, check_interval=1):
    """监控日志文件尾部变化"""
    import time

    # 首先获取文件初始大小
    with open(log_file, 'r', encoding='utf-8') as f:
        f.seek(0, 2)  # 移动到文件末尾
        last_position = f.tell()

    print(f"开始监控日志文件: {log_file}")
    print(f"初始位置: {last_position}")

    while True:
        try:
            with open(log_file, 'r', encoding='utf-8') as f:
                f.seek(last_position)  # 移动到上次读取的位置

                # 读取新增的内容（限制每次读取大小）
                new_lines = f.readlines(8192)  # 每次最多读取8KB

                if new_lines:
                    print(f"\n发现 {len(new_lines)} 条新日志:")
                    for line in new_lines:
                        print(f"  {line.strip()}")

                    # 更新最后位置
                    last_position = f.tell()

            time.sleep(check_interval)

        except KeyboardInterrupt:
            print("\n停止监控")
            break
        except FileNotFoundError:
            print(f"日志文件 {log_file} 不存在")
            break


if __name__=='__main__':
    process_large_file_in_chunks('codekg.log', 8192)  # 每次读取约8KB
    # memory_efficient_file_processing('codekg.log','codekg_processed.log')
    # monitor_log_tail('codekg.log')
