"""
封装所有与 AI 模型交互的逻辑
"""

import re
import json
import logging
import moviepy.editor as mp
from . import config
from .config import LLM_CLIENT
from .cache_service import cache_result, CacheKeys

@cache_result(CacheKeys.LLM_RESPONSE, ttl=1800)  # 缓存30分钟
def call_llm(prompt: str, is_json: bool = False) -> str:
    """调用大语言模型 (LLM) 并返回结果。"""
    if not LLM_CLIENT: return ""
    try:
        messages = [{"role": "user", "content": prompt}]
        response = LLM_CLIENT.chat.completions.create(model="deepseek/deepseek-r1-0528:free", messages=messages)
        content = response.choices[0].message.content.strip()
        if is_json:
            match = re.search(r'```json\n(.*?)```', content, re.DOTALL)
            return match.group(1) if match else content
        return content
    except Exception as e:
        logging.error(f"调用 LLM 时出错: {e}")
        return ""

@cache_result(CacheKeys.CHAPTERS, ttl=3600)  # 缓存1小时
def generate_chapters(full_text: str, srt_path: str, min_chapters: int, max_chapters: int, min_chapter_duration: int) -> str:
    """使用 LLM 分析视频文本，生成符合 YouTube 格式的章节。"""
    logging.info("正在请求 LLM 生成章节信息...")
    srt_content = ""
    with open(srt_path, 'r', encoding='utf-8') as f:
        srt_content = f.read()

    prompt = f"""Analyze the following video transcript and its SRT content to generate a list of video chapters for YouTube. Follow these rules strictly:
1.  Create between {min_chapters} and {max_chapters} chapters.
2.  Each chapter must be at least {min_chapter_duration} seconds long.
3.  The first chapter MUST start at exactly 00:00.
4.  Format each chapter as `HH:MM:SS Chapter Title` or `MM:SS Chapter Title` on a new line.
5.  Do NOT include any introductory, concluding, or explanatory text. Only output the chapter list.

Example:
00:00 Introduction
01:23 Key Feature 1
03:45 Demonstration
05:10 Conclusion

Video Transcript:
{full_text}

SRT Content (for timing reference):
{srt_content}

Chapters:"""
    
    chapters_text = call_llm(prompt)
    if chapters_text:
        logging.info("章节信息已从 LLM 成功获取。")
        return chapters_text
    else:
        logging.warning("LLM 未能生成章节信息。")
        return ""

@cache_result(CacheKeys.SUMMARY_KEYWORDS, ttl=3600)  # 缓存1小时
def generate_summary_and_keywords(full_text: str) -> dict:
    """使用 LLM 分析视频文本，生成摘要和关键词，并增加容错。"""
    logging.info("正在请求 LLM 生成摘要和关键词...")
    prompt = f"""Based on the following video transcript, generate a concise summary (around 100-150 words) and 5-10 relevant keywords. Return the result as a valid JSON object in a ```json block. The format should be: \n{{"summary": "Your concise summary here.", "keywords": ["keyword1", "keyword2", "keyword3"]}}.\n\nVideo Transcript:\n{full_text}\n\nJSON:"""
    
    result_json_str = call_llm(prompt, is_json=True)
    
    if not result_json_str:
        logging.warning("LLM 未返回有效的摘要和关键词内容。")
        return {"summary": "", "keywords": []}

    try:
        result = json.loads(result_json_str)
        logging.info("摘要和关键词已生成。")
        return result
    except (json.JSONDecodeError, TypeError) as e:
        logging.warning(f"未能解析摘要和关键词JSON: {e}")
        return {"summary": "", "keywords": []}

def generate_ai_metadata(full_text: str, original_srt_path: str, original_video_path: str, skip_chapter_generation: bool = False):
    """生成所有AI相关的元数据，包括带时间戳的知识卡片，并对章节进行严格验证。"""
    knowledge_cards = []
    if not config.SKIP_KNOWLEDGE_CARD_EXTRACTION:
        logging.info("正在请求 LLM 提取带时间戳的知识卡片...")
        
        srt_content = ""
        try:
            with open(original_srt_path, 'r', encoding='utf-8') as f:
                srt_content = f.read()
        except FileNotFoundError:
            logging.warning(f"SRT文件未找到: {original_srt_path}，无法为知识卡片提供精确定时。")

        knowledge_prompt = f"""From the following video transcript, extract the 3-5 most important key concepts. For each concept, provide a brief, one-sentence definition and its exact start time in seconds.

Use the provided SRT content for precise timing. The 'start_time' must be a float number representing the moment the concept is first mentioned.

Return the result as a valid JSON object in a ```json block. The format must be an array of objects, like this example:
[
  {{
    "concept": "Statefulness",
    "definition": "The ability of a system to remember previous interactions and events.",
    "start_time": 49.5
  }},
  {{
    "concept": "Toolchain",
    "definition": "The set of programming tools used to create a software product.",
    "start_time": 111.2
  }}
]

Transcript:
{full_text}

SRT Content (for timing reference):
{srt_content}
"""
        knowledge_json_str = call_llm(knowledge_prompt, is_json=True)
        try:
            knowledge_cards = json.loads(knowledge_json_str)
            logging.info("已成功提取带时间戳的知识卡片。")
        except (json.JSONDecodeError, TypeError):
            knowledge_cards = []
            logging.warning("未能解析知识卡片JSON，将跳过此步骤。")
    else:
        logging.info("已跳过知识卡片提取。")

    chapters = ""
    if not skip_chapter_generation:
        video_duration = mp.VideoFileClip(original_video_path).duration
        
        if video_duration < 5 * 60: 
            min_chapters, max_chapters, min_chapter_duration = 3, 5, 20
        elif video_duration < 15 * 60: 
            min_chapters, max_chapters, min_chapter_duration = 5, 10, 30
        else: 
            min_chapters, max_chapters, min_chapter_duration = 8, 15, 45

        chapters_raw = generate_chapters(full_text, original_srt_path, min_chapters, max_chapters, min_chapter_duration)
        
        if chapters_raw:
            logging.info(f"LLM 原始章节信息:\n---\n{chapters_raw}\n---")
            chapter_lines = []
            time_format_re = r'^(?:\d{2}:)?\d{2}:\d{2} .+'
            for line in chapters_raw.split('\n'):
                if re.match(time_format_re, line.strip()):
                    chapter_lines.append(line.strip())
            
            is_valid = True
            if len(chapter_lines) < 3:
                logging.warning("章节数量少于3个，不符合YouTube要求。")
                is_valid = False
            if is_valid and not chapter_lines[0].startswith("00:00"):
                logging.warning("第一个章节不从 00:00 开始，不符合YouTube要求。")
                is_valid = False
            
            if is_valid:
                chapters = "\n".join(chapter_lines)
                logging.info(f"清理并验证后的章节信息:\n---\n{chapters}\n---")
            else:
                logging.warning("生成的章节未通过验证，将被丢弃。")
                chapters = ""
    else:
        logging.info("已跳过章节信息生成。")

    video_summary = ""
    video_keywords = []
    if not config.SKIP_SUMMARY_AND_KEYWORDS_EXTRACTION:
        summary_and_keywords_result = generate_summary_and_keywords(full_text)
        video_summary = summary_and_keywords_result.get("summary", "")
        video_keywords = summary_and_keywords_result.get("keywords", [])
    else:
        logging.info("已跳过摘要和关键词提取。")

    return knowledge_cards, chapters, video_summary, video_keywords
