from typing import Dict, Any, Optional, List, Tuple
from datetime import datetime
import re
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import spacy
from transformers import pipeline

from utils.logger import Logger
from utils.exceptions import ToolError

logger = get_logger("segment_analyzer")


class SegmentAnalyzer:
    """段落分析器：负责文档的智能分段和相关性分析"""

    def __init__(self):
        try:
            self.nlp = spacy.load("zh_core_web_sm")
        except:
            try:
                self.nlp = spacy.load("en_core_web_sm")
            except:
                logger.warning("Spacy模型加载失败，使用基础分词")
                self.nlp = None
        
        # 初始化语义相似度模型
        try:
            self.similarity_model = pipeline("feature-extraction", model="sentence-transformers/all-MiniLM-L6-v2")
        except:
            logger.warning("语义相似度模型加载失败，使用TF-IDF")
            self.similarity_model = None
            self.vectorizer = TfidfVectorizer(max_features=1000, stop_words='english')

    def extract_chapters(self, file_path: str) -> List[Dict[str, Any]]:
        """提取章节段落"""
        try:
            content = self._read_file_content(file_path)
            chapters = []
            
            # 基于正则表达式的章节检测
            chapter_patterns = [
                r'第[一二三四五六七八九十]+章\s+(.+)',
                r'第\d+章\s+(.+)',
                r'Chapter\s+\d+\s*:?\s*(.+)',
                r'\d+\.\s+(.+)'  # 数字编号
            ]
            
            lines = content.split('\n')
            current_chapter = None
            chapter_content = []
            
            for line_num, line in enumerate(lines):
                line = line.strip()
                if not line:
                    continue
                
                # 检查是否为章节标题
                is_chapter = False
                for pattern in chapter_patterns:
                    match = re.match(pattern, line, re.IGNORECASE)
                    if match:
                        # 保存上一个章节
                        if current_chapter:
                            chapters.append({
                                "title": current_chapter["title"],
                                "content": '\n'.join(chapter_content),
                                "start_line": current_chapter["start_line"],
                                "end_line": line_num - 1,
                                "page_num": current_chapter.get("page_num", 1),
                                "type": "chapter"
                            })
                        
                        # 开始新章节
                        current_chapter = {
                            "title": match.group(1) if match.groups() else line,
                            "start_line": line_num,
                            "page_num": self._estimate_page_num(line_num)
                        }
                        chapter_content = []
                        is_chapter = True
                        break
                
                if not is_chapter and current_chapter:
                    chapter_content.append(line)
            
            # 保存最后一个章节
            if current_chapter:
                chapters.append({
                    "title": current_chapter["title"],
                    "content": '\n'.join(chapter_content),
                    "start_line": current_chapter["start_line"],
                    "end_line": len(lines) - 1,
                    "page_num": current_chapter.get("page_num", 1),
                    "type": "chapter"
                })
            
            logger.info(f"章节提取完成: {len(chapters)} 个章节")
            return chapters
            
        except Exception as e:
            logger.error(f"章节提取失败: {file_path}, error={str(e)}")
            return []

    def extract_pages(self, file_path: str) -> List[Dict[str, Any]]:
        """提取页面段落"""
        try:
            content = self._read_file_content(file_path)
            pages = []
            
            # 估算每页的行数（假设每页约50行）
            lines_per_page = 50
            lines = content.split('\n')
            total_lines = len(lines)
            total_pages = (total_lines + lines_per_page - 1) // lines_per_page
            
            for page_num in range(total_pages):
                start_line = page_num * lines_per_page
                end_line = min((page_num + 1) * lines_per_page, total_lines)
                page_content = '\n'.join(lines[start_line:end_line])
                
                if page_content.strip():
                    pages.append({
                        "title": f"第{page_num + 1}页",
                        "content": page_content,
                        "start_line": start_line,
                        "end_line": end_line - 1,
                        "page_num": page_num + 1,
                        "type": "page"
                    })
            
            logger.info(f"页面提取完成: {len(pages)} 个页面")
            return pages
            
        except Exception as e:
            logger.error(f"页面提取失败: {file_path}, error={str(e)}")
            return []

    def extract_paragraphs(self, file_path: str) -> List[Dict[str, Any]]:
        """提取自然段落"""
        try:
            content = self._read_file_content(file_path)
            paragraphs = []
            
            # 基于空行分割的自然段落
            raw_paragraphs = content.split('\n\n')
            
            for para_num, para_content in enumerate(raw_paragraphs):
                para_content = para_content.strip()
                if para_content and len(para_content) > 10:  # 过滤过短的段落
                    paragraphs.append({
                        "title": f"段落{para_num + 1}",
                        "content": para_content,
                        "paragraph_num": para_num + 1,
                        "char_count": len(para_content),
                        "word_count": len(para_content.split()),
                        "type": "paragraph"
                    })
            
            logger.info(f"段落提取完成: {len(paragraphs)} 个段落")
            return paragraphs
            
        except Exception as e:
            logger.error(f"段落提取失败: {file_path}, error={str(e)}")
            return []

    def extract_sections(self, file_path: str) -> List[Dict[str, Any]]:
        """提取结构化段落"""
        try:
            content = self._read_file_content(file_path)
            sections = []
            
            # 基于标题层级的结构化分段
            section_hierarchy = self._build_section_hierarchy(content)
            
            for section in section_hierarchy:
                sections.append({
                    "title": section["title"],
                    "content": section["content"],
                    "level": section["level"],
                    "parent": section.get("parent", ""),
                    "start_line": section["start_line"],
                    "end_line": section["end_line"],
                    "type": "section"
                })
            
            logger.info(f"结构化段落提取完成: {len(sections)} 个段落")
            return sections
            
        except Exception as e:
            logger.error(f"结构化段落提取失败: {file_path}, error={str(e)}")
            return []

    def intelligent_segment(self, file_path: str) -> List[Dict[str, Any]]:
        """智能分段（基于内容语义）"""
        try:
            content = self._read_file_content(file_path)
            
            # 使用自然语言处理进行语义分段
            if self.nlp:
                return self._semantic_segment(content)
            else:
                # 回退到基于长度的分段
                return self._length_based_segment(content)
                
        except Exception as e:
            logger.error(f"智能分段失败: {file_path}, error={str(e)}")
            return []

    def search_relevant_segments(self, query: str, segments: List[Dict[str, Any]], 
                               top_k: int = 5, similarity_threshold: float = 0.7) -> List[Dict[str, Any]]:
        """搜索相关段落"""
        try:
            if not segments:
                return []
            
            # 提取段落内容
            segment_contents = [seg.get("content", "") for seg in segments]
            
            # 计算相似度
            similarities = self._calculate_similarities(query, segment_contents)
            
            # 排序并选择top-k
            segment_scores = list(zip(segments, similarities))
            segment_scores.sort(key=lambda x: x[1], reverse=True)
            
            relevant_segments = []
            for segment, similarity in segment_scores[:top_k]:
                if similarity >= similarity_threshold:
                    segment["similarity"] = similarity
                    relevant_segments.append(segment)
            
            logger.info(f"相关段落搜索完成: query='{query}', relevant={len(relevant_segments)}")
            return relevant_segments
            
        except Exception as e:
            logger.error(f"相关段落搜索失败: {str(e)}")
            return []

    def _read_file_content(self, file_path: str) -> str:
        """读取文件内容"""
        try:
            path = Path(file_path)
            
            if path.suffix.lower() == '.pdf':
                return self._read_pdf_content(file_path)
            elif path.suffix.lower() in ['.docx', '.doc']:
                return self._read_docx_content(file_path)
            elif path.suffix.lower() in ['.txt', '.md']:
                with open(file_path, 'r', encoding='utf-8') as f:
                    return f.read()
            else:
                # 尝试作为文本文件读取
                with open(file_path, 'r', encoding='utf-8') as f:
                    return f.read()
                    
        except Exception as e:
            logger.error(f"文件读取失败: {file_path}, error={str(e)}")
            return ""

    def _read_pdf_content(self, file_path: str) -> str:
        """读取PDF内容"""
        try:
            import pdfplumber
            content = []
            
            with pdfplumber.open(file_path) as pdf:
                for page in pdf.pages:
                    text = page.extract_text()
                    if text:
                        content.append(text)
            
            return '\n'.join(content)
            
        except Exception as e:
            logger.error(f"PDF读取失败: {file_path}, error={str(e)}")
            return ""

    def _read_docx_content(self, file_path: str) -> str:
        """读取DOCX内容"""
        try:
            from docx import Document
            doc = Document(file_path)
            
            content = []
            for paragraph in doc.paragraphs:
                if paragraph.text.strip():
                    content.append(paragraph.text)
            
            return '\n'.join(content)
            
        except Exception as e:
            logger.error(f"DOCX读取失败: {file_path}, error={str(e)}")
            return ""

    def _estimate_page_num(self, line_num: int, lines_per_page: int = 50) -> int:
        """估算页码"""
        return (line_num // lines_per_page) + 1

    def _build_section_hierarchy(self, content: str) -> List[Dict[str, Any]]:
        """构建章节层级结构"""
        sections = []
        lines = content.split('\n')
        
        current_sections = [{} for _ in range(10)]  # 最多10级层级
        
        for line_num, line in enumerate(lines):
            line = line.strip()
            if not line:
                continue
            
            level = self._detect_heading_level(line)
            if level > 0:
                # 清空子层级
                for i in range(level, len(current_sections)):
                    current_sections[i] = {}
                
                # 设置当前层级
                current_sections[level - 1] = {
                    "title": line,
                    "level": level,
                    "start_line": line_num,
                    "content_lines": []
                }
                
                # 构建完整路径
                parent_path = " > ".join([
                    s.get("title", "") for s in current_sections[:level-1] 
                    if s.get("title", "")
                ])
                
                sections.append({
                    "title": line,
                    "content": "",  # 将在后续填充
                    "level": level,
                    "parent": parent_path,
                    "start_line": line_num,
                    "end_line": line_num
                })
        
        # 填充内容
        for i, section in enumerate(sections):
            start_line = section["start_line"]
            end_line = sections[i + 1]["start_line"] if i + 1 < len(sections) else len(lines)
            
            content_lines = lines[start_line:end_line]
            section["content"] = '\n'.join(content_lines)
            section["end_line"] = end_line - 1
        
        return sections

    def _detect_heading_level(self, text: str) -> int:
        """检测标题级别"""
        if not text:
            return 0
        
        # 基于格式的标题级别检测
        if re.match(r'^第[一二三四五六七八九十]+章', text):
            return 1
        elif re.match(r'^第\d+章', text):
            return 1
        elif re.match(r'^\d+\.\s+', text):
            return 2
        elif re.match(r'^\d+\.\d+\.\s+', text):
            return 3
        elif text.isupper() and len(text) < 50:
            return 2
        elif text[0].isupper() and len(text) < 80:
            return 3
        
        return 0

    def _semantic_segment(self, content: str) -> List[Dict[str, Any]]:
        """基于语义的分段"""
        # 这里可以实现更复杂的语义分段算法
        # 目前回退到基于长度的分段
        return self._length_based_segment(content)

    def _length_based_segment(self, content: str, max_length: int = 2000) -> List[Dict[str, Any]]:
        """基于长度的分段"""
        segments = []
        lines = content.split('\n')
        
        current_segment = []
        current_length = 0
        segment_num = 1
        
        for line in lines:
            line_length = len(line)
            
            if current_length + line_length > max_length and current_segment:
                # 保存当前段落
                segment_content = '\n'.join(current_segment)
                segments.append({
                    "title": f"段落{segment_num}",
                    "content": segment_content,
                    "segment_num": segment_num,
                    "char_count": len(segment_content),
                    "word_count": len(segment_content.split()),
                    "type": "intelligent"
                })
                
                # 开始新段落
                current_segment = [line] if line.strip() else []
                current_length = line_length if line.strip() else 0
                segment_num += 1
            else:
                if line.strip():
                    current_segment.append(line)
                    current_length += line_length
        
        # 保存最后一个段落
        if current_segment:
            segment_content = '\n'.join(current_segment)
            segments.append({
                "title": f"段落{segment_num}",
                "content": segment_content,
                "segment_num": segment_num,
                "char_count": len(segment_content),
                "word_count": len(segment_content.split()),
                "type": "intelligent"
            })
        
        return segments

    def _calculate_similarities(self, query: str, contents: List[str]) -> List[float]:
        """计算相似度"""
        try:
            if self.similarity_model:
                # 使用语义模型
                return self._calculate_semantic_similarity(query, contents)
            else:
                # 使用TF-IDF
                return self._calculate_tfidf_similarity(query, contents)
                
        except Exception as e:
            logger.error(f"相似度计算失败: {str(e)}")
            # 回退到简单的关键词匹配
            return self._calculate_keyword_similarity(query, contents)

    def _calculate_semantic_similarity(self, query: str, contents: List[str]) -> List[float]:
        """计算语义相似度"""
        try:
            # 获取查询的嵌入向量
            query_embedding = np.mean(self.similarity_model(query)[0], axis=0)
            
            similarities = []
            for content in contents:
                if content.strip():
                    content_embedding = np.mean(self.similarity_model(content[:512])[0], axis=0)  # 限制长度
                    similarity = cosine_similarity([query_embedding], [content_embedding])[0][0]
                    similarities.append(float(similarity))
                else:
                    similarities.append(0.0)
            
            return similarities
            
        except Exception as e:
            logger.error(f"语义相似度计算失败: {str(e)}")
            return self._calculate_tfidf_similarity(query, contents)

    def _calculate_tfidf_similarity(self, query: str, contents: List[str]) -> List[float]:
        """计算TF-IDF相似度"""
        try:
            # 构建文档-词项矩阵
            documents = [query] + contents
            tfidf_matrix = self.vectorizer.fit_transform(documents)
            
            # 计算查询与每个内容的相似度
            query_vector = tfidf_matrix[0:1]
            content_vectors = tfidf_matrix[1:]
            
            similarities = cosine_similarity(query_vector, content_vectors)[0]
            return similarities.tolist()
            
        except Exception as e:
            logger.error(f"TF-IDF相似度计算失败: {str(e)}")
            return self._calculate_keyword_similarity(query, contents)

    def _calculate_keyword_similarity(self, query: str, contents: List[str]) -> List[float]:
        """计算关键词相似度（回退方案）"""
        try:
            query_words = set(query.lower().split())
            similarities = []
            
            for content in contents:
                if content.strip():
                    content_words = set(content.lower().split())
                    intersection = query_words.intersection(content_words)
                    union = query_words.union(content_words)
                    
                    jaccard_similarity = len(intersection) / len(union) if union else 0.0
                    similarities.append(jaccard_similarity)
                else:
                    similarities.append(0.0)
            
            return similarities
            
        except Exception as e:
            logger.error(f"关键词相似度计算失败: {str(e)}")
            return [0.0] * len(contents)