import os
import json
import re
import logging
from pathlib import Path
from typing import List, Dict, Any, Optional, Tuple
from src.config_reader import ConfigReader

from .image_llm import get_image_response
from .chat_llm import get_response

# --- Setup basic logging ---
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


class VideoAnalysis:
    """
    封装处理视频帧、提取信息、生成总结和进行历史对话的类。
    """
    # --- Constants ---
    FRAME_RESPONSES_FILE = "frame_responses.json"
    SUMMARY_MEMORY_FILE = "summary_memory.json"
    FINAL_SUMMARY_FILE = "final_summary.json"
    FRAME_FILENAME_PATTERN = r"keyframe_(\d{2}:\d{2}\.\d+)\.jpg" # 帧时间提取 pattern
    DEFAULT_INITIAL_THEME = "幼儿英语教学"

    
    def __init__(self, video_name: str, initial_theme: str = DEFAULT_INITIAL_THEME):
        """
        初始化 VideoAnalysis。

        Args:
            image_dir (str): 包含视频帧图像的目录路径。
            output_dir (str): 用于存储中间和最终结果的目录路径。
            initial_theme (str): 对话开始时的初始主题。
        """
        
        # init config
        config = ConfigReader()
        BASE_DIR = config.get('output.base_dir_path') # 项目根目录
        IMAGE_OUTPUT_BASE_PATH = config.get('output.image_output_base_path') # 图片输出/读取目录
        ANALYSIS_OUTPUT_BASE_PATH = config.get('output.analysis_output_base_path') # 分析结果输出目录

        # 去除视频名中的 .mp4 后缀
        video_name = video_name.replace('.mp4', '')
        image_dir = os.path.join(BASE_DIR, IMAGE_OUTPUT_BASE_PATH, video_name)
        output_dir = os.path.join(BASE_DIR, ANALYSIS_OUTPUT_BASE_PATH, video_name)
        
        self.image_dir = Path(image_dir).resolve()
        self.output_dir = Path(output_dir).resolve()
        self.initial_theme = initial_theme

        # 内部缓存
        self._frame_responses: Optional[List[Dict]] = None
        self._summary_memory: Optional[Dict] = None
        self._final_summary: Optional[Dict] = None
        self._sorted_frame_paths: Optional[List[Tuple[Path, float]]] = None

        # 创建输出目录
        self.output_dir.mkdir(parents=True, exist_ok=True)

        if not self.image_dir.is_dir():
            raise ValueError(f"Image directory not found or is not a directory: {self.image_dir}")

    @staticmethod
    def get_frame_time_from_path(frame_path: Path) -> float:
        """从文件名中提取时间戳（秒）。假定格式符合 FRAME_FILENAME_PATTERN。"""
        match = re.search(VideoAnalysis.FRAME_FILENAME_PATTERN, frame_path.name)
        if not match:
            logging.warning(f"Could not parse time from filename: {frame_path.name}. Returning 0.0.")
            return 0.0
        time_str = match.group(1)
        try:
            minutes, seconds = time_str.split(':')
            total_seconds = float(minutes) * 60 + float(seconds)
            return total_seconds
        except ValueError as e:
            logging.error(f"Error parsing time string '{time_str}' from {frame_path.name}: {e}")
            return 0.0

    def _get_sorted_frame_paths(self) -> List[Tuple[Path, float]]:
        """获取并按时间排序图像文件路径及其时间戳。"""
        if self._sorted_frame_paths is None:
            frame_paths = []
            try:
                for f in self.image_dir.glob('*.jpg'): # 使用 glob 更安全
                    if re.search(self.FRAME_FILENAME_PATTERN, f.name):
                        timestamp = self.get_frame_time_from_path(f)
                        frame_paths.append((f, timestamp))
                    else:
                         logging.warning(f"Skipping file with unexpected name format: {f.name}")
            except Exception as e:
                logging.error(f"Error reading image directory {self.image_dir}: {e}")
                return [] # Return empty list on error

            # 按时间戳排序
            frame_paths.sort(key=lambda item: item[1])
            self._sorted_frame_paths = frame_paths
            logging.info(f"Found and sorted {len(self._sorted_frame_paths)} frame images.")
        return self._sorted_frame_paths

    def _load_json(self, filename: str) -> Optional[Any]:
        """从输出目录安全地加载 JSON 文件。"""
        file_path = self.output_dir / filename
        if file_path.exists():
            try:
                with open(file_path, "r", encoding="utf-8") as f:
                    data = json.load(f)
                    logging.info(f"Successfully loaded data from {file_path}")
                    return data
            except json.JSONDecodeError as e:
                logging.error(f"Error decoding JSON from {file_path}: {e}")
            except Exception as e:
                logging.error(f"Error loading file {file_path}: {e}")
        return None

    def _save_json(self, data: Any, filename: str) -> None:
        """将数据安全地保存为 JSON 文件到输出目录。"""
        file_path = self.output_dir / filename
        try:
            with open(file_path, "w", encoding="utf-8") as f:
                json.dump(data, f, ensure_ascii=False, indent=2)
            logging.info(f"Successfully saved data to {file_path}")
        except Exception as e:
            logging.error(f"Error saving data to {file_path}: {e}")

    def _parse_llm_json_output(self, output_str: str) -> Optional[Dict]:
        """清理并解析 LLM 可能返回的 JSON 字符串（包括代码块标记）。"""
        if not isinstance(output_str, str):
             logging.error(f"Expected string output from LLM, but got {type(output_str)}")
             return None
        # 移除 Markdown 代码块标记和可能的空白符
        cleaned_str = re.sub(r'^```json\s*|\s*```$', '', output_str, flags=re.MULTILINE).strip()
        if not cleaned_str:
            logging.warning("Received empty string after cleaning LLM output.")
            return None
        try:
            return json.loads(cleaned_str)
        except json.JSONDecodeError as e:
            logging.error(f"Failed to parse LLM JSON output: {e}\nOriginal string: '{output_str[:200]}...'")
            return None
        except Exception as e:
            logging.error(f"An unexpected error occurred during JSON parsing: {e}")
            return None

    def process_frames(self, force_rerun: bool = False) -> Optional[List[Dict]]:
        """
        处理视频帧，调用图像 LLM 生成每帧的描述。
        结果会被缓存，除非 force_rerun 为 True。

        Args:
            force_rerun (bool): 如果为 True，即使存在缓存文件，也强制重新处理。

        Returns:
            Optional[List[Dict]]: 每帧的响应列表，如果处理失败则返回 None。
        """
        if not force_rerun and self._frame_responses is None:
            self._frame_responses = self._load_json(self.FRAME_RESPONSES_FILE)

        if force_rerun or self._frame_responses is None:
            logging.info("Processing video frames...")
            sorted_frames = self._get_sorted_frame_paths()
            if not sorted_frames:
                logging.error("No valid frame images found to process.")
                return None

            processed_responses = []
            history = None # Image LLM history
            for frame_path, _ in sorted_frames:
                try:
                    res, history = get_image_response(str(frame_path), history) # Pass history
                    if isinstance(res, dict):
                         processed_responses.append(res)
                    else:
                         logging.warning(f"Received non-dict response for {frame_path.name}, skipping.")
                except Exception as e:
                    logging.error(f"Error processing frame {frame_path.name} with image LLM: {e}")
                    # Decide whether to stop or continue; here we continue
                    processed_responses.append({"error": f"Failed to process frame: {e}"}) # Add error marker

            self._frame_responses = processed_responses
            self._save_json(self._frame_responses, self.FRAME_RESPONSES_FILE)
        return self._frame_responses

    def extract_information(self, force_rerun: bool = False) -> Optional[Dict]:
        """
        从帧响应中提取结构化信息（主题、主体、故事发展）。
        依赖于 process_frames 的结果。结果会被缓存。

        Args:
            force_rerun (bool): 如果为 True，强制重新提取。

        Returns:
            Optional[Dict]: 包含每帧提取信息和最终历史记录的字典，失败则返回 None。
        """
        if self._frame_responses is None and not force_rerun:
            self.process_frames() # Try to load or generate frame responses first

        if self._frame_responses is None:
             logging.error("Cannot extract information without frame responses.")
             return None

        if not force_rerun and self._summary_memory is None:
            self._summary_memory = self._load_json(self.SUMMARY_MEMORY_FILE)

        if force_rerun or self._summary_memory is None:
            logging.info("Extracting information from frame responses...")
            current_theme = self.initial_theme
            current_history = [] # Stores identified subjects/characters
            current_summary = "" # Rolling summary
            summary_memory = {}

            template = """
            当前主题:{theme}
            历史人物信息:{history}
            历史画面信息总结:{history_summary}
            当前画面内容:{current_frame_content}
            ---
            请根据以上信息，分析当前画面内容，并以JSON格式返回以下信息：
            1. "theme_refinement": 对当前主题的细化或确认 (字符串)
            2. "new_subjects": 从当前画面新识别出的重要主体/人物列表，每个包含 "name" 和 "description" (列表，可为空)
            3. "story_development": 对当前画面如何推进故事或场景的简要描述 (字符串)
            """

            for i, frame_res in enumerate(self._frame_responses):
                 # Handle potential errors from previous step
                if isinstance(frame_res, dict) and "error" in frame_res:
                    logging.warning(f"Skipping frame {i} due to previous processing error: {frame_res['error']}")
                    summary_memory[str(i)] = {"error": f"Skipped due to frame processing error."}
                    continue
                if not isinstance(frame_res, dict):
                     logging.warning(f"Skipping frame {i} due to invalid format: {type(frame_res)}")
                     summary_memory[str(i)] = {"error": f"Invalid frame response format."}
                     continue


                user_input = template.format(
                    theme=current_theme,
                    history=json.dumps(current_history, ensure_ascii=False), # Pass history as JSON string
                    history_summary=current_summary,
                    current_frame_content=json.dumps(frame_res, ensure_ascii=False) # Pass frame content as JSON string
                )

                try:
                    raw_output = get_response(user_input)
                    output_data = self._parse_llm_json_output(raw_output)

                    if output_data and isinstance(output_data, dict):
                        summary_memory[str(i)] = output_data
                        # Update state for next iteration
                        current_theme = output_data.get("theme_refinement", current_theme)
                        new_subjects = output_data.get("new_subjects", [])
                        if isinstance(new_subjects, list) and len(new_subjects) > 0:
                            # Basic check to avoid adding duplicates - can be improved
                            existing_names = {subj.get("name") for subj in current_history if isinstance(subj, dict)}
                            for subj in new_subjects:
                                if isinstance(subj, dict) and subj.get("name") not in existing_names:
                                     current_history.append(subj)
                        current_summary = output_data.get("story_development", current_summary)
                    else:
                        logging.error(f"Failed to get valid structured data for frame {i}. Raw output: {raw_output[:200]}...")
                        summary_memory[str(i)] = {"error": "Failed to parse LLM response"}

                except Exception as e:
                    logging.error(f"Error processing frame {i} during information extraction: {e}")
                    summary_memory[str(i)] = {"error": f"Exception during processing: {e}"}

            summary_memory["history"] = current_history # Store the final accumulated history
            self._summary_memory = summary_memory
            self._save_json(self._summary_memory, self.SUMMARY_MEMORY_FILE)

        return self._summary_memory

    def generate_final_summary(self, force_rerun: bool = False) -> Optional[Dict]:
        """
        基于提取的信息生成最终的教学内容和故事总结。
        依赖于 extract_information 的结果。结果会被缓存。

        Args:
            force_rerun (bool): 如果为 True，强制重新生成。

        Returns:
            Optional[Dict]: 包含最终教学内容和故事总结的字典，失败则返回 None。
        """
        if self._summary_memory is None and not force_rerun:
            self.extract_information() # Try to load or generate summary memory first

        if self._summary_memory is None:
             logging.error("Cannot generate final summary without summary memory.")
             return None

        if not force_rerun and self._final_summary is None:
            self._final_summary = self._load_json(self.FINAL_SUMMARY_FILE)

        if force_rerun or self._final_summary is None:
            logging.info("Generating final summary...")
            all_teaching_content = set()
            all_story_development = []

            # Iterate through frame summaries, skipping the 'history' key and error entries
            for key, value in self._summary_memory.items():
                if key != "history" and isinstance(value, dict) and "error" not in value:
                    if "theme_refinement" in value:
                        all_teaching_content.add(str(value["theme_refinement"]))
                    if "story_development" in value:
                         all_story_development.append(str(value["story_development"]))

            if not all_teaching_content and not all_story_development:
                 logging.warning("No valid content found in summary memory to generate final summary.")
                 # Optionally save an empty summary or handle as error
                 self._final_summary = {"teaching_content": "N/A", "story_development": "N/A", "error": "No content"}
                 self._save_json(self._final_summary, self.FINAL_SUMMARY_FILE)
                 return self._final_summary


            teaching_content_str = "\n".join(filter(None, all_teaching_content))
            story_development_str = "\n".join(filter(None, all_story_development))

            user_input = f"视频教学内容要点:\n{teaching_content_str}\n\n故事发展线索:\n{story_development_str}"
            system_prompt = (
                "请对提供的视频教学内容要点和故事发展线索进行总结。"
                "对于教学内容，请提炼核心教学目标与重点。"
                "对于故事发展，请组织成一个逻辑连贯的简短故事梗概。"
                "请以JSON格式返回结果，包含两个键：'teaching_content' (总结后的教学内容) 和 'story_development' (总结后的故事梗概)。"
            )

            try:
                raw_output = get_response(user_input=user_input, system_prompt=system_prompt)
                final_summary_data = self._parse_llm_json_output(raw_output)

                if final_summary_data and isinstance(final_summary_data, dict):
                    self._final_summary = final_summary_data
                    self._save_json(self._final_summary, self.FINAL_SUMMARY_FILE)
                else:
                    logging.error(f"Failed to get valid final summary. Raw output: {raw_output[:200]}...")
                    self._final_summary = {"error": "Failed to parse final summary response"}
                    # Optionally save the error state
                    self._save_json(self._final_summary, self.FINAL_SUMMARY_FILE)

            except Exception as e:
                logging.error(f"Error during final summary generation: {e}")
                self._final_summary = {"error": f"Exception during final summary generation: {e}"}
                # Optionally save the error state
                self._save_json(self._final_summary, self.FINAL_SUMMARY_FILE)

        return self._final_summary

    def run_full_analysis(self, force_rerun_all: bool = False) -> bool:
        """
        按顺序执行完整的分析流程：处理帧、提取信息、生成总结。

        Args:
            force_rerun_all (bool): 是否强制重新运行所有步骤。

        Returns:
            bool: 如果所有步骤都成功（或从缓存加载）则返回 True，否则返回 False。
        """
        logging.info("Starting full video analysis...")
        if not self.process_frames(force_rerun=force_rerun_all):
            logging.error("Full analysis failed at: process_frames")
            return False
        if not self.extract_information(force_rerun=force_rerun_all):
            logging.error("Full analysis failed at: extract_information")
            return False
        if not self.generate_final_summary(force_rerun=force_rerun_all):
            logging.error("Full analysis failed at: generate_final_summary")
            return False

        logging.info("Full video analysis completed successfully.")
        return True

    # --- Dialogue Methods ---

    def get_frame_index_by_time(self, target_time: float) -> int:
        """
        根据目标时间（秒）找到最近（<= target_time）的帧的索引。

        Args:
            target_time (float): 目标时间（秒）。

        Returns:
            int: 最接近目标时间的帧的索引（基于排序后的列表）。
                 如果找不到合适的帧（例如 target_time 小于第一帧时间），返回 0。
                 如果无法获取帧列表，返回 -1。
        """
        sorted_frames = self._get_sorted_frame_paths()
        if not sorted_frames:
            logging.error("Cannot get frame index: Frame paths not available.")
            return -1

        closest_index = 0 # Default to first frame if target_time is very small
        for i, (_, frame_time) in enumerate(sorted_frames):
            if frame_time <= target_time:
                closest_index = i
            else:
                # Since frames are sorted, we can stop early
                break

        logging.debug(f"Target time {target_time}s maps to frame index {closest_index} (time: {sorted_frames[closest_index][1]:.3f}s)")
        return closest_index

    def _load_history_before_index(self, target_index: int) -> Optional[Dict[str, Any]]:
        """
        加载指定帧索引（包含）之前的所有历史信息。
        内部使用，依赖于已加载或生成的缓存数据。

        Args:
            target_index (int): 目标帧的索引。

        Returns:
            Optional[Dict[str, Any]]: 包含历史信息的字典，如果缺少必要数据则返回 None。
        """
        # Ensure required data is loaded/generated
        if self._summary_memory is None:
            self.extract_information()
            if self._summary_memory is None: return None

        if self._final_summary is None:
            self.generate_final_summary()
            if self._final_summary is None: return None

        history_info = {
            "frames": [],
            "history": self._summary_memory.get("history", []), # Accumulated subjects/characters
            "final_summary": self._final_summary
        }

        # Collect frame summaries up to the target index
        for i in range(target_index + 1):
            frame_data = self._summary_memory.get(str(i))
            if frame_data and isinstance(frame_data, dict) and "error" not in frame_data:
                history_info["frames"].append(frame_data)
            elif frame_data and isinstance(frame_data, dict) and "error" in frame_data:
                 history_info["frames"].append({"warning": f"Frame {i} had processing errors."}) # Include marker
            else:
                # This case might indicate incomplete summary_memory.json
                logging.warning(f"Missing or invalid data for frame index {i} in summary memory.")
                history_info["frames"].append({"warning": f"Missing data for frame {i}."})


        return history_info

    def _generate_dialogue_prompt(self, history_info: Dict[str, Any], user_question: str, current_frame_index: int) -> str:
        """
        根据历史信息和用户问题生成用于对话 LLM 的提示。

        Args:
            history_info (Dict[str, Any]): 从 _load_history_before_index 获取的信息。
            user_question (str): 用户的原始问题。
            current_frame_index (int): 用户提问时所在的帧索引。

        Returns:
            str: 格式化后的提示字符串。
        """
        # Build historical frame context string
        frame_info_parts = []
        for i, frame in enumerate(history_info.get("frames", [])):
            parts = [f"帧 {i+1}:"]
            if isinstance(frame, dict):
                 if "theme_refinement" in frame: parts.append(f"  主题细化: {frame['theme_refinement']}")
                 if "story_development" in frame: parts.append(f"  内容/故事发展: {frame['story_development']}")
                 if "warning" in frame: parts.append(f"  注意: {frame['warning']}")
                 if "error" in frame: parts.append(f"  错误: {frame['error']}") # Include errors if any
            else:
                 parts.append("  (无效数据)")
            frame_info_parts.append("\n".join(parts))
        frame_context = "\n\n".join(frame_info_parts)

        # Build historical character/subject context string
        character_parts = []
        for i, subject in enumerate(history_info.get("history", [])):
             if isinstance(subject, dict):
                 parts = [f"历史主体/人物 {i+1}:"]
                 if "name" in subject: parts.append(f"  名称: {subject['name']}")
                 if "description" in subject: parts.append(f"  描述: {subject['description']}")
                 character_parts.append("\n".join(parts))
        character_context = "\n\n".join(character_parts) if character_parts else "无"


        # Build overall summary context string
        final_summary = history_info.get("final_summary", {})
        summary_context = f"""
视频整体总结:
教学目标与重点:
{final_summary.get('teaching_content', '未生成')}

故事梗概:
{final_summary.get('story_development', '未生成')}
"""
        if "error" in final_summary:
             summary_context += f"\n注意: 最终总结生成时遇到错误: {final_summary['error']}"


        # Construct the final prompt
        prompt = f"""
你是一个友好的AI助手，正在帮助用户理解一个幼儿教学视频的内容。
用户当前暂停在视频的第 {current_frame_index + 1} 帧画面上进行提问。

请基于以下提供的历史信息和视频总结来回答用户的问题：

### 历史画面信息 (按时间顺序):
{frame_context if frame_context else "无"}

### 历史中出现的主体/人物信息:
{character_context}

### 视频整体总结:
{summary_context}

------------------------

用户问题: {user_question}

------------------------

请注意：
1.  你的回答对象是幼儿，请使用简单、易懂、友好的语言。
2.  结合用户当前所在的帧数（第 {current_frame_index + 1} 帧）以及之前的历史信息进行回答。
3.  如果历史信息中有相关的人物，请在回答中体现出来。
4.  如果信息不足或不确定，可以说明。
"""
        return prompt

    def ask(self, target_time: float, user_question: str) -> str:
        """
        在指定时间点，根据历史信息回答用户的问题。

        Args:
            target_time (float): 用户提问时视频的时间点（秒）。
            user_question (str): 用户的问题。

        Returns:
            str: LLM 生成的回答。如果过程中出错，则返回错误信息。
        """
        logging.info(f"Received question '{user_question}' at time {target_time:.2f}s.")

        # 1. Find the corresponding frame index
        current_frame_index = self.get_frame_index_by_time(target_time)
        if current_frame_index == -1:
            return "错误：无法确定当前帧索引，可能是图像文件读取问题。"

        # 2. Load history up to that frame index
        history_info = self._load_history_before_index(current_frame_index)
        if not history_info:
            return "错误：无法加载必要的历史信息进行回答。请先运行完整分析。"

        # 3. Generate the prompt
        prompt = self._generate_dialogue_prompt(history_info, user_question, current_frame_index)
        logging.debug(f"Generated prompt for dialogue:\n{prompt}")
        system_prompt = "你是一个幼儿英语教学助手，正在回答小朋友关于视频的问题,注意你的语气要像一个幼儿老师一样,并且通俗易懂的回答问题。"
        # 4. Get response from LLM
        try:
            response = get_response(user_input=prompt, system_prompt=system_prompt)
            # Optional: Parse or clean the response if needed, but for dialogue, raw text is often fine.
            return response
        except Exception as e:
            logging.error(f"Error getting response from dialogue LLM: {e}")
            return f"抱歉，我在尝试回答您的问题时遇到了一个内部错误：{e}"


# --- Example Usage ---
if __name__ == "__main__":
    EXAMPLE_IMAGE_DIR = Path("./example_frames")
    EXAMPLE_OUTPUT_DIR = Path("./output_results")
    EXAMPLE_IMAGE_DIR.mkdir(exist_ok=True)
    EXAMPLE_OUTPUT_DIR.mkdir(exist_ok=True)

    # Create some dummy frame files for testing
    dummy_files = [
        "prefix_keyframe_00:05.100.jpg",
        "prefix_keyframe_00:12.500.jpg",
        "prefix_keyframe_00:23.800.jpg",
        "otherfile.txt", # Should be ignored
        "keyframe_invalid_format.jpg" # Should be warned and skipped
    ]
    for fname in dummy_files:
        (EXAMPLE_IMAGE_DIR / fname).touch()
    # --- End Configuration & Setup ---


    try:
        # 1. Initialize the analyzer
        # Use the actual paths in your environment
        analyzer = VideoAnalysis(
            image_dir=str(EXAMPLE_IMAGE_DIR), # Convert Path back to string if needed by __init__
            output_dir=str(EXAMPLE_OUTPUT_DIR)
        )

        # 2. Run the full analysis (will use cache if files exist)
        # Set force_rerun_all=True to ignore cache and re-process everything
        analysis_successful = analyzer.run_full_analysis(force_rerun_all=False)

        if not analysis_successful:
             print("\n视频分析未能成功完成。请检查日志了解详情。")
        else:
             print("\n视频分析完成（或从缓存加载）。")

             # 3. Perform a dialogue query if analysis was successful
             target_time = 23.0  # Example time in seconds
             user_question = "那个黄色的小猫在做什么？" # Example question

             print(f"\n在时间点 {target_time}s 提问: '{user_question}'")
             dialogue_response = analyzer.ask(target_time, user_question)

             print("\nAI 回答:")
             print(dialogue_response)

             # Example: Ask about something earlier
             target_time_early = 6.0
             user_question_early = "最开始看到了什么？"
             print(f"\n在时间点 {target_time_early}s 提问: '{user_question_early}'")
             dialogue_response_early = analyzer.ask(target_time_early, user_question_early)
             print("\nAI 回答:")
             print(dialogue_response_early)


    except ValueError as ve:
         print(f"\n初始化错误: {ve}")
    except Exception as e:
        print(f"\n发生意外错误: {e}")
        logging.exception("Unhandled exception in main block.")

    # Cleanup dummy files (optional)
    # import shutil
    # shutil.rmtree(EXAMPLE_IMAGE_DIR)
    # shutil.rmtree(EXAMPLE_OUTPUT_DIR)