import re


def deduplicate_by_user_id(input_file, output_file):
    """根据user_id去重ERROR日志（针对你的特定格式）"""

    seen_user_ids = set()
    unique_errors = []
    duplicate_count = 0

    # 精确匹配你的日志格式：user_id：后面跟着数字
    user_id_pattern = r'user_id：(\d+)'

    try:
        with open(input_file, 'r', encoding='utf-8') as infile:
            lines = infile.readlines()

        total_lines = len(lines)

        for line in lines:
            # 查找user_id
            match = re.search(user_id_pattern, line)

            if match:
                user_id = match.group(1)
                if user_id not in seen_user_ids:
                    # 新的user_id，保留该行
                    seen_user_ids.add(user_id)
                    unique_errors.append(line)
                else:
                    # 重复的user_id，跳过
                    duplicate_count += 1
            else:
                # 没有user_id的行直接保留
                unique_errors.append(line)

        # 写入去重后的文件
        with open(output_file, 'w', encoding='utf-8') as outfile:
            outfile.writelines(unique_errors)

        # 打印统计信息
        print("去重完成！")
        print(f"总行数: {total_lines}")
        print(f"去重后行数: {len(unique_errors)}")
        print(f"去除重复: {duplicate_count} 行")
        print(f"唯一user_id数量: {len(seen_user_ids)}")

    except Exception as e:
        print(f"处理文件时出错: {e}")




if __name__ == '__main__':
    # 使用示例
    deduplicate_by_user_id('error.log', 'unique_errors.log')