"""
有声读物文本预处理服务
"""

import re
import asyncio
from typing import List, Dict, Any, Optional, Tuple
from dataclasses import dataclass
from enum import Enum
import logging
from datetime import datetime
import jieba
import nltk
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
from collections import Counter

from ...core.exceptions import ValidationError, BusinessLogicError
from ...core.logging import get_logger
from ...core.config import settings

logger = get_logger(__name__)


class TextPreprocessor:
    """文本预处理器"""
    
    def __init__(self):
        self._initialized = False
        self._language_models = {}
    
    async def initialize(self):
        """初始化语言模型"""
        if self._initialized:
            return
        
        try:
            # 下载必要的NLTK数据
            nltk_data_packages = [
                'punkt', 'stopwords', 'averaged_perceptron_tagger',
                'maxent_ne_chunker', 'words', 'wordnet'
            ]
            
            for package in nltk_data_packages:
                try:
                    nltk.data.find(f'tokenizers/{package}')
                except LookupError:
                    logger.info(f"Downloading NLTK package: {package}")
                    nltk.download(package, quiet=True)
            
            # 初始化jieba
            jieba.setLogLevel(logging.INFO)
            
            self._initialized = True
            logger.info("Text preprocessor initialized successfully")
            
        except Exception as e:
            logger.error(f"Failed to initialize text preprocessor: {e}")
            raise BusinessLogicError(f"Text preprocessor initialization failed: {str(e)}")
    
    def detect_language(self, text: str) -> str:
        """检测文本语言"""
        if not text:
            return "unknown"
        
        # 简单的语言检测
        chinese_chars = len(re.findall(r'[\u4e00-\u9fff]', text))
        total_chars = len(text)
        chinese_ratio = chinese_chars / total_chars if total_chars > 0 else 0
        
        # 检测英语
        english_words = len(re.findall(r'\b[a-zA-Z]+\b', text))
        english_ratio = english_words / len(text.split()) if text.split() else 0
        
        if chinese_ratio > 0.3:
            return "zh-CN"
        elif english_ratio > 0.5:
            return "en-US"
        elif re.search(r'[ひらがなカタカナ]', text):
            return "ja-JP"
        elif re.search(r'[가-힣]', text):
            return "ko-KR"
        else:
            return "unknown"
    
    def clean_text(self, text: str, language: str = None) -> str:
        """清理文本"""
        if not text:
            return ""
        
        # 移除多余的空白字符
        text = re.sub(r'\s+', ' ', text)
        
        # 移除特殊字符（保留标点符号）
        if language == "zh-CN":
            # 中文文本处理
            text = re.sub(r'[^\u4e00-\u9fff\u3400-\u4dbf\uf900-\ufaff\w\s，。！？；：""''（）【】《》—\-]', '', text)
        elif language and language.startswith("en"):
            # 英文文本处理
            text = re.sub(r'[^\w\s.,!?;:"\'()\[\]\-]', '', text)
        
        # 移除行首行尾空格
        text = '\n'.join(line.strip() for line in text.split('\n'))
        
        return text.strip()
    
    def segment_sentences(self, text: str, language: str = None) -> List[str]:
        """句子分割"""
        if not text:
            return []
        
        language = language or self.detect_language(text)
        
        try:
            if language == "zh-CN":
                # 中文句子分割
                sentences = re.split(r'[。！？；]\s*', text)
                sentences = [s.strip() for s in sentences if s.strip()]
            else:
                # 使用NLTK进行句子分割
                sentences = sent_tokenize(text)
            
            return sentences
            
        except Exception as e:
            logger.error(f"Sentence segmentation failed: {e}")
            # 回退到简单分割
            return [text]
    
    def segment_paragraphs(self, text: str, language: str = None) -> List[str]:
        """段落分割"""
        if not text:
            return []
        
        # 按双换行符分割段落
        paragraphs = re.split(r'\n\s*\n', text)
        paragraphs = [p.strip() for p in paragraphs if p.strip()]
        
        return paragraphs
    
    def extract_keywords(self, text: str, language: str = None, top_k: int = 10) -> List[str]:
        """提取关键词"""
        if not text:
            return []
        
        language = language or self.detect_language(text)
        
        try:
            if language == "zh-CN":
                # 中文关键词提取
                words = jieba.cut(text)
                words = [word for word in words if len(word) > 1]
            else:
                # 英文关键词提取
                words = word_tokenize(text.lower())
                
                # 移除停用词
                try:
                    stop_words = set(stopwords.words('english'))
                    words = [word for word in words if word not in stop_words and word.isalpha()]
                except:
                    pass
            
            # 词频统计
            word_freq = Counter(words)
            keywords = [word for word, freq in word_freq.most_common(top_k)]
            
            return keywords
            
        except Exception as e:
            logger.error(f"Keyword extraction failed: {e}")
            return []
    
    def detect_structure(self, text: str) -> Dict[str, Any]:
        """检测文本结构"""
        if not text:
            return {}
        
        structure = {
            "has_chapters": False,
            "has_sections": False,
            "has_dialogue": False,
            "has_list": False,
            "estimated_reading_time": 0,
            "complexity_score": 0
        }
        
        try:
            # 检测章节
            chapter_patterns = [
                r'第[一二三四五六七八九十\d]+章',
                r'Chapter\s+\d+',
                r'^\d+\.',
                r'^[一二三四五六七八九十]+、'
            ]
            
            for pattern in chapter_patterns:
                if re.search(pattern, text, re.MULTILINE):
                    structure["has_chapters"] = True
                    break
            
            # 检测小节
            section_patterns = [
                r'^\d+\.\d+',
                r'^[一二三四五六七八九十]+\.',
                r'^§\s*\d+'
            ]
            
            for pattern in section_patterns:
                if re.search(pattern, text, re.MULTILINE):
                    structure["has_sections"] = True
                    break
            
            # 检测对话
            if re.search(r'["「」『』].*?["「」』』]', text):
                structure["has_dialogue"] = True
            
            # 检测列表
            if re.search(r'^[-*•]\s+', text, re.MULTILINE):
                structure["has_list"] = True
            
            # 估算阅读时间（基于字符数）
            char_count = len(text)
            if self.detect_language(text) == "zh-CN":
                # 中文：每分钟300字
                structure["estimated_reading_time"] = char_count / 300
            else:
                # 英文：每分钟200词
                word_count = len(text.split())
                structure["estimated_reading_time"] = word_count / 200
            
            # 复杂度评分
            sentences = self.segment_sentences(text)
            if sentences:
                avg_sentence_length = len(text) / len(sentences)
                structure["complexity_score"] = min(10, avg_sentence_length / 20)
            
        except Exception as e:
            logger.error(f"Structure detection failed: {e}")
        
        return structure


@dataclass
class ChapterSegment:
    """章节片段"""
    title: str
    content: str
    order: int
    start_position: int
    end_position: int
    estimated_duration: float
    word_count: int
    language: str


@dataclass
class AudioBookStructure:
    """有声读物结构"""
    title: str
    author: Optional[str]
    language: str
    total_segments: int
    estimated_total_duration: float
    structure_info: Dict[str, Any]
    segments: List[ChapterSegment]


class AudioBookProcessor:
    """有声读物处理器"""
    
    def __init__(self):
        self.text_processor = TextPreprocessor()
        self._initialized = False
    
    async def initialize(self):
        """初始化处理器"""
        if self._initialized:
            return
        
        await self.text_processor.initialize()
        self._initialized = True
        logger.info("Audio book processor initialized")
    
    def extract_metadata(self, text: str) -> Dict[str, Any]:
        """提取元数据"""
        metadata = {
            "title": None,
            "author": None,
            "description": None,
            "language": None,
            "creation_date": datetime.utcnow().isoformat()
        }
        
        if not text:
            return metadata
        
        try:
            lines = text.split('\n')[:10]  # 只检查前10行
            
            for line in lines:
                line = line.strip()
                if not line:
                    continue
                
                # 尝试识别标题
                if not metadata["title"]:
                    title_patterns = [
                        r'^《(.+)》',
                        r'^(.+)$'  # 第一行作为标题
                    ]
                    
                    for pattern in title_patterns:
                        match = re.search(pattern, line)
                        if match:
                            metadata["title"] = match.group(1).strip()
                            break
                
                # 尝试识别作者
                if not metadata["author"] and "作者" in line:
                    author_match = re.search(r'作者[：:]\s*(.+)', line)
                    if author_match:
                        metadata["author"] = author_match.group(1).strip()
                
                # 尝试识别描述
                if metadata["title"] and not metadata["description"]:
                    if len(line) > 50 and not re.search(r'第.*章|Chapter', line):
                        metadata["description"] = line
            
            # 检测语言
            metadata["language"] = self.text_processor.detect_language(text)
            
        except Exception as e:
            logger.error(f"Metadata extraction failed: {e}")
        
        return metadata
    
    async def analyze_structure(self, text: str) -> Dict[str, Any]:
        """分析文本结构"""
        if not text:
            return {}
        
        try:
            structure = self.text_processor.detect_structure(text)
            
            # 分析章节分布
            chapters = self._detect_chapters(text)
            structure["chapters_info"] = {
                "count": len(chapters),
                "avg_length": sum(len(ch.get("content", "")) for ch in chapters) / len(chapters) if chapters else 0,
                "titles": [ch.get("title", "") for ch in chapters[:5]]  # 前5个章节标题
            }
            
            # 分析内容类型
            content_type = self._detect_content_type(text)
            structure["content_type"] = content_type
            
            return structure
            
        except Exception as e:
            logger.error(f"Structure analysis failed: {e}")
            return {}
    
    def _detect_chapters(self, text: str) -> List[Dict[str, Any]]:
        """检测章节"""
        chapters = []
        
        # 章节标题模式
        chapter_patterns = [
            r'^(第[一二三四五六七八九十\d]+章)\s*(.*)',
            r'^(Chapter\s+\d+)\s*(.*)',
            r'^(\d+\.)\s*(.*)',
            r'^([一二三四五六七八九十]+、)\s*(.*)'
        ]
        
        lines = text.split('\n')
        current_chapter = {"title": None, "content": [], "start_line": 0}
        chapter_count = 0
        
        for i, line in enumerate(lines):
            line = line.strip()
            
            # 检查是否为章节标题
            for pattern in chapter_patterns:
                match = re.search(pattern, line)
                if match:
                    # 保存上一章
                    if current_chapter["title"]:
                        current_chapter["content"] = '\n'.join(current_chapter["content"])
                        chapters.append({
                            "order": chapter_count,
                            "title": current_chapter["title"],
                            "content": current_chapter["content"]
                        })
                        chapter_count += 1
                    
                    # 开始新章
                    current_chapter = {
                        "title": match.group(1),
                        "subtitle": match.group(2).strip() if len(match.groups()) > 1 else None,
                        "content": [],
                        "start_line": i
                    }
                    break
            else:
                # 非章节标题，添加到当前章节内容
                if current_chapter["title"]:
                    current_chapter["content"].append(line)
                elif line:  # 第一章之前的内容
                    pass
        
        # 保存最后一章
        if current_chapter["title"]:
            current_chapter["content"] = '\n'.join(current_chapter["content"])
            chapters.append({
                "order": chapter_count,
                "title": current_chapter["title"],
                "content": current_chapter["content"]
            })
        
        return chapters
    
    def _detect_content_type(self, text: str) -> str:
        """检测内容类型"""
        if not text:
            return "unknown"
        
        # 检测对话比例
        dialogue_chars = len(re.findall(r'["「」『』]', text))
        dialogue_ratio = dialogue_chars / len(text) if text else 0
        
        # 检测说明文特征
        expository_patterns = [
            r'首先|其次|再次|最后',
            r'因为|所以|但是|然而',
            r'根据|按照|基于'
        ]
        
        expository_count = sum(1 for pattern in expository_patterns if re.search(pattern, text))
        
        # 检测叙事文特征
        narrative_patterns = [
            r'说|想|觉得|感觉',
            r'过去|现在|将来',
            r'今天|昨天|明天'
        ]
        
        narrative_count = sum(1 for pattern in narrative_patterns if re.search(pattern, text))
        
        if dialogue_ratio > 0.2:
            return "dialogue"
        elif expository_count > narrative_count:
            return "expository"
        elif narrative_count > 3:
            return "narrative"
        else:
            return "mixed"
    
    async def create_segments(
        self,
        text: str,
        max_segment_length: int = 5000,
        preferred_segment_length: int = 2000,
        language: str = None,
        preserve_paragraphs: bool = True
    ) -> AudioBookStructure:
        """创建音频片段"""
        if not text:
            raise ValidationError("Text content cannot be empty")
        
        await self.initialize()
        
        try:
            # 提取元数据
            metadata = self.extract_metadata(text)
            language = language or metadata.get("language", "zh-CN")
            
            # 分析结构
            structure_info = await self.analyze_structure(text)
            
            # 检测章节
            detected_chapters = self._detect_chapters(text)
            
            segments = []
            total_order = 0
            
            if detected_chapters and len(detected_chapters) > 1:
                # 基于检测到的章节创建片段
                for chapter in detected_chapters:
                    chapter_segments = self._create_chapter_segments(
                        chapter["content"],
                        chapter["title"],
                        total_order,
                        max_segment_length,
                        preferred_segment_length,
                        language,
                        preserve_paragraphs
                    )
                    segments.extend(chapter_segments)
                    total_order += len(chapter_segments)
            else:
                # 没有明确的章节，按段落分割
                segments = self._create_content_segments(
                    text,
                    None,
                    total_order,
                    max_segment_length,
                    preferred_segment_length,
                    language,
                    preserve_paragraphs
                )
            
            # 计算总时长估算
            total_duration = sum(segment.estimated_duration for segment in segments)
            
            structure = AudioBookStructure(
                title=metadata.get("title", "未命名有声读物"),
                author=metadata.get("author"),
                language=language,
                total_segments=len(segments),
                estimated_total_duration=total_duration,
                structure_info=structure_info,
                segments=segments
            )
            
            logger.info(f"Created {len(segments)} segments for audio book")
            return structure
            
        except Exception as e:
            logger.error(f"Failed to create audio book segments: {e}")
            raise BusinessLogicError(f"Segment creation failed: {str(e)}")
    
    def _create_chapter_segments(
        self,
        chapter_content: str,
        chapter_title: str,
        start_order: int,
        max_segment_length: int,
        preferred_segment_length: int,
        language: str,
        preserve_paragraphs: bool
    ) -> List[ChapterSegment]:
        """为章节创建片段"""
        segments = []
        
        if not chapter_content.strip():
            return segments
        
        # 清理文本
        cleaned_content = self.text_processor.clean_text(chapter_content, language)
        
        if len(cleaned_content) <= max_segment_length:
            # 章节内容较短，作为一个片段
            segment = ChapterSegment(
                title=chapter_title,
                content=cleaned_content,
                order=start_order,
                start_position=0,
                end_position=len(cleaned_content),
                estimated_duration=self._estimate_duration(cleaned_content, language),
                word_count=len(cleaned_content.split()) if language.startswith("en") else len(cleaned_content),
                language=language
            )
            segments.append(segment)
        else:
            # 章节内容较长，需要分割
            if preserve_paragraphs:
                segments = self._segment_by_paragraphs(
                    cleaned_content,
                    chapter_title,
                    start_order,
                    max_segment_length,
                    preferred_segment_length,
                    language
                )
            else:
                segments = self._segment_by_length(
                    cleaned_content,
                    chapter_title,
                    start_order,
                    max_segment_length,
                    preferred_segment_length,
                    language
                )
        
        return segments
    
    def _create_content_segments(
        self,
        content: str,
        title: Optional[str],
        start_order: int,
        max_segment_length: int,
        preferred_segment_length: int,
        language: str,
        preserve_paragraphs: bool
    ) -> List[ChapterSegment]:
        """为内容创建片段"""
        title = title or "正文"
        
        if preserve_paragraphs:
            return self._segment_by_paragraphs(
                content,
                title,
                start_order,
                max_segment_length,
                preferred_segment_length,
                language
            )
        else:
            return self._segment_by_length(
                content,
                title,
                start_order,
                max_segment_length,
                preferred_segment_length,
                language
            )
    
    def _segment_by_paragraphs(
        self,
        content: str,
        base_title: str,
        start_order: int,
        max_segment_length: int,
        preferred_segment_length: int,
        language: str
    ) -> List[ChapterSegment]:
        """按段落分割"""
        segments = []
        paragraphs = self.text_processor.segment_paragraphs(content, language)
        
        current_segment_content = ""
        current_order = start_order
        segment_index = 1
        
        for paragraph in paragraphs:
            if len(current_segment_content + "\n\n" + paragraph) <= max_segment_length:
                # 添加到当前片段
                if current_segment_content:
                    current_segment_content += "\n\n" + paragraph
                else:
                    current_segment_content = paragraph
            else:
                # 当前片段已满，保存并开始新片段
                if current_segment_content:
                    segment = self._create_segment(
                        current_segment_content,
                        f"{base_title} - 第{segment_index}段",
                        current_order,
                        language
                    )
                    segments.append(segment)
                    current_order += 1
                    segment_index += 1
                
                current_segment_content = paragraph
        
        # 保存最后一个片段
        if current_segment_content:
            segment = self._create_segment(
                current_segment_content,
                f"{base_title} - 第{segment_index}段",
                current_order,
                language
            )
            segments.append(segment)
        
        return segments
    
    def _segment_by_length(
        self,
        content: str,
        base_title: str,
        start_order: int,
        max_segment_length: int,
        preferred_segment_length: int,
        language: str
    ) -> List[ChapterSegment]:
        """按长度分割"""
        segments = []
        current_order = start_order
        segment_index = 1
        
        position = 0
        while position < len(content):
            # 确定片段长度
            remaining_length = len(content) - position
            segment_length = min(preferred_segment_length, remaining_length)
            
            # 如果剩余内容很少，直接取完
            if remaining_length <= max_segment_length:
                segment_length = remaining_length
            
            # 提取片段内容
            segment_content = content[position:position + segment_length]
            
            # 调整到句子边界
            if position + segment_length < len(content):
                sentence_end = segment_content.rfind('。')
                if sentence_end > len(segment_content) * 0.7:  # 如果句子的结尾位置合理
                    segment_content = segment_content[:sentence_end + 1]
                    segment_length = len(segment_content)
            
            # 创建片段
            if segment_content.strip():
                segment = self._create_segment(
                    segment_content.strip(),
                    f"{base_title} - 第{segment_index}段",
                    current_order,
                    language
                )
                segments.append(segment)
                current_order += 1
                segment_index += 1
            
            position += segment_length
        
        return segments
    
    def _create_segment(
        self,
        content: str,
        title: str,
        order: int,
        language: str
    ) -> ChapterSegment:
        """创建片段"""
        return ChapterSegment(
            title=title,
            content=content,
            order=order,
            start_position=0,  # 简化处理
            end_position=len(content),
            estimated_duration=self._estimate_duration(content, language),
            word_count=len(content.split()) if language.startswith("en") else len(content),
            language=language
        )
    
    def _estimate_duration(self, text: str, language: str) -> float:
        """估算音频时长"""
        if not text:
            return 0.0
        
        if language == "zh-CN":
            # 中文：每分钟200字
            chars = len(text)
            duration = (chars / 200) * 60  # 秒
        else:
            # 英文：每分钟150词
            words = len(text.split())
            duration = (words / 150) * 60  # 秒
        
        # 添加一些缓冲时间
        return duration * 1.1


# 全局处理器实例
audio_book_processor = AudioBookProcessor()
