"""
AI摘要模块
使用Ollama本地大模型生成智能摘要
"""

import logging
import re
from typing import Dict, Any, List, Optional, Callable, Union
from datetime import timedelta

from .ollama_client import OllamaClient, get_recommended_model
from prompts.prompt_loader import get_prompt


class VideoSummarizer:
    """视频摘要生成器"""

    def __init__(self, ollama_client: Optional[OllamaClient] = None, model_name: Optional[str] = None):
        self.logger = logging.getLogger(__name__)
        self.ollama_client = ollama_client if ollama_client is not None else OllamaClient()
        self.model_name = model_name if model_name is not None else get_recommended_model()

        # 检查Ollama服务
        if not self.ollama_client.is_available():
            raise ConnectionError("Ollama服务不可用，请确保Ollama已启动")

        # 检查模型是否存在
        if not self.ollama_client.model_exists(self.model_name):
            self.logger.warning(f"模型 {self.model_name} 不存在，将尝试下载")

    def generate_summary(self, transcript: str, style: str = "detailed",
                         language: str = "zh", custom_prompt: Optional[str] = None,
                         progress_callback: Optional[Callable[[int, str], None]] = None) -> Dict[str, Any]:
        """生成视频摘要"""
        try:
            if progress_callback:
                progress_callback(10, "准备生成摘要...")

            # 预处理转录文本
            processed_transcript = self._preprocess_transcript(transcript)

            if progress_callback:
                progress_callback(20, "分析文本内容...")

            # 生成摘要
            summary_prompt_template = custom_prompt or get_prompt('summary', style, language)
            summary_prompt = summary_prompt_template.format(transcript=processed_transcript)
            summary = self._generate_text(summary_prompt)

            if progress_callback:
                progress_callback(50, "提取关键词...")

            # 提取关键词
            keywords = self._extract_keywords(processed_transcript, language)

            if progress_callback:
                progress_callback(70, "划分章节...")

            # 章节划分
            chapters = self._divide_chapters(processed_transcript, language)

            if progress_callback:
                progress_callback(90, "生成最终报告...")

            # 生成要点
            key_points = self._extract_key_points(summary, language)

            result = {
                "summary": summary,
                "keywords": keywords,
                "key_points": key_points,
                "chapters": chapters,
                "style": style,
                "language": language,
                "word_count": len(transcript.split()),
                "summary_ratio": len(summary.split()) / len(transcript.split()) if transcript else 0
            }

            if progress_callback:
                progress_callback(100, "摘要生成完成!")

            return result

        except Exception as e:
            self.logger.error(f"生成摘要失败: {e}")
            raise

    def _preprocess_transcript(self, transcript: str) -> str:
        """预处理转录文本"""
        # 移除多余的空白字符
        text = re.sub(r'\s+', ' ', transcript.strip())

        # 修复常见的转录错误
        text = re.sub(r'(\w)\s+(\w)', r'\1\2', text)  # 修复单词间的多余空格
        text = re.sub(r'([。！？])\s*', r'\1\n', text)  # 在句号后添加换行

        return text

    def _generate_text(self, prompt: str) -> str:
        """生成文本"""
        try:
            response = self.ollama_client.generate(
                model=self.model_name,
                prompt=prompt,
                temperature=0.7,
                max_tokens=2000
            )

            if "error" in response:
                raise Exception(response["error"])

            return response.get("response", "").strip()

        except Exception as e:
            self.logger.error(f"文本生成失败: {e}")
            raise

    def _extract_keywords(self, text: str, language: str) -> List[str]:
        """提取关键词"""
        prompt_template = get_prompt('keywords', 'general', language)
        prompt = prompt_template.format(text=text[:1000])
        try:
            response = self._generate_text(prompt)
            keywords = [kw.strip() for kw in response.split(',') if kw.strip()]
            return keywords[:15]  # 限制关键词数量
        except Exception as e:
            self.logger.error(f"关键词提取失败: {e}")
            return []

    def _extract_key_points(self, summary: str, language: str) -> List[str]:
        """从摘要中提取要点"""
        prompt_template = get_prompt('others', 'key_points', language)
        prompt = prompt_template.format(summary=summary)
        try:
            response = self._generate_text(prompt)
            points = [point.strip() for point in response.split('\n') if point.strip()]
            return [point.lstrip('•-*1234567890. ') for point in points if point.strip()]
        except Exception as e:
            self.logger.error(f"要点提取失败: {e}")
            return []

    def _divide_chapters(self, transcript: str, language: str) -> List[Dict[str, Any]]:
        """智能章节划分"""
        try:
            # 简单的章节划分逻辑（基于段落和时间）
            paragraphs = transcript.split('\n')
            chapters = []
            current_chapter = ""
            chapter_count = 1

            for i, paragraph in enumerate(paragraphs):
                if len(paragraph.strip()) > 0:
                    current_chapter += paragraph + " "

                    # 每500字左右划分一个章节
                    if len(current_chapter) > 500 or i == len(paragraphs) - 1:
                        if current_chapter.strip():
                            # 生成章节标题
                            title = self._generate_chapter_title(current_chapter, language)
                            chapters.append({
                                "chapter": chapter_count,
                                "title": title,
                                "content": current_chapter.strip(),
                                "word_count": len(current_chapter.split()),
                                "start_time": (chapter_count - 1) * 300,  # 假设每章节5分钟
                                "end_time": chapter_count * 300
                            })
                            chapter_count += 1
                            current_chapter = ""

            return chapters

        except Exception as e:
            self.logger.error(f"章节划分失败: {e}")
            return []

    def _generate_chapter_title(self, content: str, language: str) -> str:
        """生成章节标题"""
        try:
            prompt_template = get_prompt('others', 'chapter_title', language)
            prompt = prompt_template.format(content=content[:200])
            title = self._generate_text(prompt)
            return title.strip().replace('"', '').replace("'", "")[:20]
        except Exception as e:
            self.logger.error(f"章节标题生成失败: {e}")
            return f"第{len(self._divide_chapters('', ''))+1}章"

    def evaluate_summary_quality(self, summary: str, original_text: str) -> Dict[str, Any]:
        """评估摘要质量"""
        try:
            # 基本指标
            summary_length = len(summary.split())
            original_length = len(original_text.split())
            compression_ratio = summary_length / original_length if original_length > 0 else 0

            # 内容覆盖度评估
            coverage_score = self._calculate_coverage_score(summary, original_text)

            # 可读性评估
            readability_score = self._calculate_readability_score(summary)

            return {
                "compression_ratio": compression_ratio,
                "coverage_score": coverage_score,
                "readability_score": readability_score,
                "summary_length": summary_length,
                "original_length": original_length,
                "quality_rating": self._calculate_overall_quality(
                    compression_ratio, coverage_score, readability_score
                )
            }

        except Exception as e:
            self.logger.error(f"摘要质量评估失败: {e}")
            return {}

    def _calculate_coverage_score(self, summary: str, original: str) -> float:
        """计算内容覆盖度分数"""
        # 简单的关键词覆盖度计算
        summary_words = set(summary.lower().split())
        original_words = set(original.lower().split())

        if not original_words:
            return 0.0

        common_words = summary_words.intersection(original_words)
        return len(common_words) / len(original_words)

    def _calculate_readability_score(self, text: str) -> float:
        """计算可读性分数"""
        # 简单的可读性评估
        sentences = len(re.split(r'[.!?]', text))
        words = len(text.split())

        if sentences == 0:
            return 0.0

        avg_sentence_length = words / sentences

        # 理想句子长度为15-20词
        if 15 <= avg_sentence_length <= 20:
            return 1.0
        elif 10 <= avg_sentence_length <= 25:
            return 0.8
        else:
            return 0.6

    def _calculate_overall_quality(self, compression: float, coverage: float, readability: float) -> str:
        """计算总体质量评级"""
        score = (compression * 0.3 + coverage * 0.4 + readability * 0.3)

        if score >= 0.8:
            return "优秀"
        elif score >= 0.6:
            return "良好"
        elif score >= 0.4:
            return "一般"
        else:
            return "需要改进"