import cv2, os, math, sys, re, json
from datetime import timedelta
from config.config import DEFAULT_INTERVAL_SEC

def parse_metadata_block(block: str) -> dict:
    """
    Parse a single SRT metadata block and return a dictionary.
    Example block:
    1
    00:00:00,000 --> 00:00:00,033
    <font size="28">FrameCnt: 1, DiffTime: 33ms
    2025-01-10 17:32:22.807
    [focal_len: 40.00] [dzoom_ratio: 1.00], [latitude: 31.761855] [longitude: 117.184543] [rel_alt: 84.339 abs_alt: 109.946] [gb_yaw: -127.5 gb_pitch: -53.1 gb_roll: 0.0] </font>
    """
    lines = block.strip().splitlines()
    if len(lines) < 4:
        return {}
    try:
        fc_line = lines[2]
        timestamp = lines[3].strip()
        meta_line = ""
        for line in lines[4:]:
            if line.startswith('['):
                meta_line = line
                break
        meta = {}
        for m in re.finditer(r'\[([^:\]]+):\s*([^\]]+)\]', meta_line):
            key = m.group(1).strip()
            value = m.group(2).strip()
            if key == "rel_alt":
                # Split "rel_alt" and "abs_alt"
                pat = re.compile(r'([\d.]+)\s+abs_alt:\s*([\d.]+)')
                m2 = pat.search(value)
                if m2:
                    meta["rel_alt"] = m2.group(1)
                    meta["abs_alt"] = m2.group(2)
                else:
                    meta[key] = value
            elif key == "gb_yaw":
                # Split "gb_yaw", "gb_pitch", and "gb_roll"
                pat = re.compile(r'(-?[\d.]+)\s+gb_pitch:\s*(-?[\d.]+)\s+gb_roll:\s*(-?[\d.]+)')
                m2 = pat.search(value)
                if m2:
                    meta["gb_yaw"] = m2.group(1)
                    meta["gb_pitch"] = m2.group(2)
                    meta["gb_roll"] = m2.group(3)
                else:
                    meta[key] = value
            else:
                meta[key] = value
        fc_match = re.search(r'FrameCnt:\s*(\d+)', fc_line)
        dt_match = re.search(r'DiffTime:\s*([\d\.]+ms)', fc_line)
        return {
            "original_frame": int(fc_match.group(1)) if fc_match else None,
            "diff_time": dt_match.group(1) if dt_match else None,
            "timestamp": timestamp,
            "metadata": meta
        }
    except Exception as e:
        print(f"[ERROR] Failed to parse metadata block: {e}")
        return {}

def parse_srt(srt_path):
    """改进版SRT解析函数"""
    with open(srt_path, 'r', encoding='utf-8') as f:
        content = f.read()
    metadata_map = {}
    blocks = re.split(r'\n\s*\n', content)
    for block in blocks:
        parsed = parse_metadata_block(block)
        if parsed and parsed.get("original_frame"):
            metadata_map[str(parsed["original_frame"])] = parsed
    return metadata_map

def get_frame_timestamp(frame_count, fps):
    # 根据帧号计算时间戳
    return str(timedelta(seconds=frame_count/fps))


def extract_frames(video_path, output_dir, interval_sec=DEFAULT_INTERVAL_SEC):
    print(f"video_path:", video_path)
    print(f"output_dir:", output_dir)
    # 修正：提取视频相对路径（仅保留Videos之后的真实子目录）
    rel_dir = ""
    if "Videos" in video_path:
        try:
            rel_part = video_path.split("Videos" + os.sep, 1)[1]
            video_dir = os.path.dirname(rel_part)
            video_name = os.path.basename(video_path)
            base_name = os.path.splitext(video_name)[0]
            parts = base_name.split('_')
            if len(parts) >= 4 and parts[-1] in ('T', 'V'):
                expected_parent = '_'.join(parts[:-1])
                if video_dir and os.path.basename(video_dir) == expected_parent:
                    print("[DEBUG] 检测到重复目录结构，自动优化路径")
                    rel_dir = os.path.dirname(video_dir)
                else:
                    rel_dir = video_dir
        except Exception as e:
            print(f"路径解析错误: {str(e)}")
            rel_dir = ""
    # 构建最终输出路径
    video_name = os.path.basename(video_path)
    base_name = os.path.splitext(video_name)[0]
    parts = base_name.split('_')
    # 修改：如果文件名以T或V结尾，则将video_base去掉最后的模态标识，用于文件夹命名及SRT查找
    if len(parts) >= 4 and parts[-1] in ('T', 'V'):
        modality = parts[-1]
        video_base = '_'.join(parts[:-1])
        output_path = os.path.join(output_dir, video_base, modality)
    else:
        video_base = base_name
        output_path = os.path.join(output_dir, base_name)
    # 新增：路径压缩函数
    def compress_path(path):
        parts = []
        for part in os.path.normpath(path).split(os.sep):
            if not part:
                continue
            if parts and part == parts[-1]:
                print(f"[路径压缩] 发现重复目录: {part}，已跳过")
                continue
            if len(parts) >= 2 and part == parts[-2]:
                print(f"[路径压缩] 发现循环目录: {parts[-1]}/{part}，中断循环")
                break
            parts.append(part)
        return os.sep.join(parts)
    original_path = output_path
    print(f"[DEBUG] 压缩前路径: {original_path}")
    output_path = compress_path(output_path)
    if output_path != original_path:
        print(f"[路径优化] 优化后: {output_path}")
    output_path = os.path.normpath(output_path)
    os.makedirs(output_path, exist_ok=True)
    print(f"[LOG] 最终抽帧路径: {output_path}")
    
    cap = cv2.VideoCapture(video_path)
    if not cap.isOpened():
        print("Error: Unable to open video")
        return
    fps = cap.get(cv2.CAP_PROP_FPS)
    frame_interval = math.floor(fps * interval_sec)  # interval in frame count
    frame_count = 0
    saved_count = 0
    next_extract = 0
    frames_metadata = []
    while True:
        ret, frame = cap.read()
        if not ret:
            break
        if frame_count >= next_extract:
            out_path = os.path.join(output_path, f"frame_{saved_count:04d}.jpg")
            cv2.imwrite(out_path, frame)
            # Change: add 1 to frame_count so that the first frame becomes 1
            frames_metadata.append({
                "original_frame": frame_count + 1,
                "timestamp": get_frame_timestamp(frame_count, fps)
            })
            saved_count += 1
            next_extract += frame_interval
        frame_count += 1
    cap.release()
    # 修改：先尝试使用完整的base_name查找SRT文件，如果未找到再尝试去掉模态标识
    srt_candidate = os.path.join(os.path.dirname(video_path), base_name + ".srt")
    if not os.path.exists(srt_candidate) and len(parts) >= 4 and parts[-1] in ('T', 'V'):
        video_base = '_'.join(parts[:-1])
        srt_candidate = os.path.join(os.path.dirname(video_path), video_base + ".srt")
    srt_metadata = parse_srt(srt_candidate) if os.path.exists(srt_candidate) else None
    # Merge SRT data into each frame and rename timestamp fields
    enhanced_frames = []
    for frame in frames_metadata:
        # Removed the condition that was skipping frame 0
        key = str(frame["original_frame"])
        new_frame = {
            "original_frame": frame["original_frame"],
            "video_timestamp": frame["timestamp"]
        }
        if srt_metadata and key in srt_metadata:
            srt_info = srt_metadata[key]
            new_frame["srt_timestamp"] = srt_info.get("timestamp")
            new_frame["diff_time"] = srt_info.get("diff_time")
            new_frame["metadata"] = srt_info.get("metadata")
        enhanced_frames.append(new_frame)

    metadata = {
        "video_name": video_name,
        "fps": fps,
        "interval_sec": interval_sec,
        "frames": enhanced_frames
    }
    meta_out = os.path.join(output_path, 'metadata.json')
    with open(meta_out, 'w', encoding='utf-8') as f:
        json.dump(metadata, f, ensure_ascii=False, indent=2)
    print(f"[LOG] 元数据已保存: {meta_out}")
    # 融入元信息加载逻辑：返回 metadata 以供后续使用
    return metadata

def process_srt_metadata(srt_file: str, output_json: str):
    """
    Process the SRT metadata file and generate a JSON file that maps each extracted
    frame (original frame) to its metadata.
    
    For example, the returned JSON will be:
    {
        "1": { 'original_frame': 1, 'diff_time': "33ms", ... },
        "2": { ... }
    }
    """
    if not os.path.exists(srt_file):
        print(f"[ERROR] SRT file not found: {srt_file}")
        return
    with open(srt_file, 'r', encoding='utf-8') as f:
        content = f.read()
    blocks = re.split(r'\n\s*\n', content)
    mapping = {}
    for block in blocks:
        parsed = parse_metadata_block(block)
        if parsed.get("original_frame") is not None:
            mapping[str(parsed["original_frame"])] = parsed
    with open(output_json, 'w', encoding='utf-8') as f:
        json.dump(mapping, f, indent=4)
    print(f"[INFO] Metadata processed and saved to {output_json}")

# Example usage:
# process_srt_metadata("path/to/metadata.srt", "extracted_metadata.json")


