import re
import json
import csv
import glob

def parse_log_to_csv(log_content, writer):
    # 正则表达式匹配每个视频块
    video_block_pattern = re.compile(
        r'【视频 (\d+)/(\d+)】\s*'
        r'视频路径:\s*(.+?)\s*'
        r'(分析结果：[\s\S]*?)(?=\n总生成时间|\Z)', 
        re.DOTALL
    )

    # 正则表达式匹配 JSON 数据
    json_pattern = re.compile(r'```json\s*({[\s\S]*?})\s*```', re.DOTALL)
    # json_pattern = re.compile(r'```json\s*([{\[].*?[\}\]])\s*```', re.DOTALL)

    # 遍历每个视频块
    for match in video_block_pattern.finditer(log_content):
        video_number = match.group(1)
        total_videos = match.group(2)
        video_path = match.group(3)
        analysis_result = match.group(4)

        # 提取 JSON 数据
        json_match = json_pattern.search(analysis_result)
        if json_match:
            try:
                # 使用 json.loads 替代 eval
                json_data = json.loads(json_match.group(1).strip())
                behaviors_data = json_data.get('behaviors', [])
                # behaviors_data = json_data

                row = {
                    '视频编号': video_number,
                    '总视频数': total_videos,
                    '视频路径': video_path
                }

                for behavior in behaviors_data:
                    name = behavior['name']
                    occurred = behavior['occurred']
                    confidence = behavior['confidence']
                    time_occurred = behavior['time_occurred']

                    row[f'{name}_occurred'] = occurred
                    row[f'{name}_confidence'] = confidence
                    row[f'{name}_time_occurred'] = time_occurred

                writer.writerow(row)
            except Exception as e:
                print(f"解析 JSON 时出错: {e}")


# 视频行为名称列表（用于 CSV 列名）
behaviors = [
    "不系安全带",
    "打电话",
    "抽烟",
    "闭眼≥2秒",
    "喝饮料",
    "吃食物",
    "玩手机",
    "双手脱离方向盘≥1秒",
    "头部左右张望≥3秒",
    "打哈欠"
]

# CSV 文件头
fieldnames = [
    '视频编号',
    '总视频数',
    '视频路径',
    *[f'{behavior}_occurred' for behavior in behaviors],
    *[f'{behavior}_confidence' for behavior in behaviors],
    *[f'{behavior}_time_occurred' for behavior in behaviors]
]

# 输出 CSV 文件路径
output_file = 'videos_analysis.csv'

# 获取当前目录下所有 _fixed.txt 文件
input_files = glob.glob('*_fixed.txt')

with open(output_file, mode='w', newline='', encoding='utf-8') as csvfile:
    writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
    writer.writeheader()

    for input_file in input_files:
        print(f"正在处理文件: {input_file}")
        with open(input_file, 'r', encoding='utf-8') as file:
            log_content = file.read()
        parse_log_to_csv(log_content, writer)

print("✅ 所有 _fixed.txt 文件已解析完成，CSV 文件已生成:", output_file)


# import re
# import json
# import csv
# import glob

# def parse_log_to_csv(log_content, writer):
#     """
#     将日志内容解析为CSV格式。

#     Args:
#         log_content (str): 日志内容字符串。
#         writer (csv.writer): CSV文件写入器。

#     Returns:
#         None

#     """
#     # 剔除 selectedCode 行
#     log_content = re.sub(r'#selectedCode.*?(\n|$)', '', log_content, flags=re.DOTALL)

#     # 正则表达式匹配每个视频块
#     video_block_pattern = re.compile(
#         r'【视频 (\d+)/(\d+)】\s*'
#         r'视频路径:\s*(.+?)\s*'
#         r'(分析结果：[\s\S]*?)(?=\n总生成时间|\Z)', 
#         re.DOTALL
#     )

#     # 正则表达式匹配 JSON 数据
#     json_pattern = re.compile(r'```json\s*([{\[][\s\S]*?[}\]])[\s\S]*?```', re.DOTALL)

#     # 遍历每个视频块
#     for match in video_block_pattern.finditer(log_content):
#         video_number = match.group(1)
#         total_videos = match.group(2)
#         video_path = match.group(3)
#         analysis_result = match.group(4)

#         # 如果包含“处理视频时发生错误”，跳过
#         if "处理视频时发生错误" in analysis_result:
#             print(f"跳过出错的视频: {video_path}")
#             continue

#         # 提取 JSON 数据
#         json_match = json_pattern.search(analysis_result)
#         if json_match:
#             try:
#                 json_data = json.loads(json_match.group(1).strip())
#                 behaviors_data = json_data.get('behaviors', [])
#                 if len(behaviors_data) == 0:
#                     behaviors_data = json_data.get('behavior', [])
#                     if len(behaviors_data) == 0:
#                         behaviors_data = json_data


#                 row = {
#                     '视频编号': video_number,
#                     '总视频数': total_videos,
#                     '视频路径': video_path
#                 }

#                 for behavior in behaviors_data:
#                     name = behavior['name']
#                     occurred = behavior['occurred']
#                     confidence = behavior['confidence']
#                     time_occurred = behavior['time_occurred']

#                     row[f'{name}_occurred'] = occurred
#                     row[f'{name}_confidence'] = confidence
#                     row[f'{name}_time_occurred'] = time_occurred

#                 writer.writerow(row)
#             except Exception as e:
#                 print(f"解析 JSON 时出错: {e}")

# # 视频行为名称列表（用于 CSV 列名）
# behaviors = [
#     "不系安全带",
#     "打电话",
#     "抽烟",
#     "闭眼≥2秒",
#     "喝饮料",
#     "吃食物",
#     "玩手机",
#     "双手脱离方向盘≥1秒",
#     "头部左右张望≥3秒"
# ]

# fieldnames = [
#     '视频编号',
#     '总视频数',
#     '视频路径',
#     *[f'{behavior}_occurred' for behavior in behaviors],
#     *[f'{behavior}_confidence' for behavior in behaviors],
#     *[f'{behavior}_time_occurred' for behavior in behaviors]
# ]

# output_file = 'videos_analysis.csv'

# input_files = glob.glob('*_fixed.txt')

# with open(output_file, mode='w', newline='', encoding='utf-8') as csvfile:
#     writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
#     writer.writeheader()

#     for input_file in input_files:
#         print(f"正在处理文件: {input_file}")
#         with open(input_file, 'r', encoding='utf-8') as file:
#             log_content = file.read()

#         # 剔除 selectedCode 引用行
#         clean_log_content = re.sub(r'#selectedCode.*?(\n|$)', '', log_content, flags=re.DOTALL)

#         parse_log_to_csv(clean_log_content, writer)

# print("✅ 所有 _fixed.txt 文件已解析完成，CSV 文件已生成:", output_file)