"""
文档切片服务
"""
import os
import re
import json
import uuid
from typing import List, Dict, Any, Optional
from datetime import datetime
import nltk
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
from collections import Counter
import math

from core.config import get_settings
from core.logger import get_task_logger

settings = get_settings()


class SliceService:
    """文档切片服务"""
    
    def __init__(self):
        self._setup_nltk()
    
    def _setup_nltk(self):
        """设置NLTK"""
        try:
            # 下载必要的NLTK数据
            nltk.data.find('tokenizers/punkt')
            nltk.data.find('corpora/stopwords')
        except LookupError:
            try:
                nltk.download('punkt', quiet=True)
                nltk.download('stopwords', quiet=True)
            except Exception as e:
                print(f"Warning: Failed to download NLTK data: {e}")
    
    async def slice_document(
        self,
        parse_task_id: int,
        options: Dict[str, Any],
        slice_id: str,
        task_logger,
        db
    ) -> Dict[str, Any]:
        """切片文档"""
        try:
            task_logger.info("Starting document slicing", {
                "parse_task_id": parse_task_id,
                "options": options
            })
            
            # 获取解析结果
            from models.database import ParseResult, ParseTask
            
            parse_task = db.query(ParseTask).filter(ParseTask.id == parse_task_id).first()
            if not parse_task:
                raise ValueError("Parse task not found")
            
            # 获取文本内容
            text_content = await self._get_text_content(parse_task_id, db)
            if not text_content:
                raise ValueError("No text content found for slicing")
            
            # 创建结果目录
            result_dir = os.path.join(settings.parsed_results_dir, f"{slice_id}_slices")
            os.makedirs(result_dir, exist_ok=True)
            
            # 根据策略进行切片
            strategy = options.get("strategy", "semantic")
            chunk_size = options.get("chunk_size", 1000)
            overlap = options.get("overlap", 200)
            min_chunk_size = options.get("min_chunk_size", 100)
            
            if strategy == "semantic":
                chunks = await self._semantic_slice(
                    text_content, chunk_size, overlap, min_chunk_size, task_logger
                )
            elif strategy == "paragraph":
                chunks = await self._paragraph_slice(
                    text_content, chunk_size, overlap, min_chunk_size, task_logger
                )
            elif strategy == "fixed":
                chunks = await self._fixed_slice(
                    text_content, chunk_size, overlap, min_chunk_size, task_logger
                )
            else:
                raise ValueError(f"Unknown slicing strategy: {strategy}")
            
            # 处理切片结果
            processed_chunks = []
            for i, chunk in enumerate(chunks):
                chunk_id = f"{slice_id}_chunk_{i+1:03d}"
                
                # 计算字数
                word_count = len(word_tokenize(chunk))
                
                # 提取位置信息
                position_info = await self._extract_position_info(chunk, text_content)
                
                # 生成元数据
                metadata = {
                    "chunk_id": chunk_id,
                    "strategy": strategy,
                    "chunk_size": chunk_size,
                    "overlap": overlap,
                    "word_count": word_count,
                    "char_count": len(chunk),
                    "created_at": datetime.now().isoformat(),
                    "source_task_id": parse_task_id,
                    "position_info": position_info
                }
                
                processed_chunk = {
                    "chunk_id": chunk_id,
                    "text_content": chunk,
                    "word_count": word_count,
                    "page_number": position_info.get("page_number"),
                    "position_info": position_info,
                    "metadata": metadata
                }
                
                processed_chunks.append(processed_chunk)
            
            # 保存切片结果
            chunks_path = os.path.join(result_dir, "chunks.json")
            with open(chunks_path, 'w', encoding='utf-8') as f:
                json.dump(processed_chunks, f, ensure_ascii=False, indent=2)
            
            # 创建元数据
            slice_metadata = {
                "slice_id": slice_id,
                "parse_task_id": parse_task_id,
                "strategy": strategy,
                "chunk_size": chunk_size,
                "overlap": overlap,
                "min_chunk_size": min_chunk_size,
                "total_chunks": len(processed_chunks),
                "total_words": sum(c["word_count"] for c in processed_chunks),
                "created_at": datetime.now().isoformat(),
                "source_file": parse_task.filename
            }
            
            metadata_path = os.path.join(result_dir, "metadata.json")
            with open(metadata_path, 'w', encoding='utf-8') as f:
                json.dump(slice_metadata, f, ensure_ascii=False, indent=2)
            
            task_logger.info("Document slicing completed", {
                "total_chunks": len(processed_chunks),
                "total_words": slice_metadata["total_words"]
            })
            
            return {
                "chunks": processed_chunks,
                "result_path": result_dir
            }
            
        except Exception as e:
            task_logger.error("Document slicing failed", error=e)
            raise
    
    async def _get_text_content(self, parse_task_id: int, db) -> Optional[str]:
        """获取文本内容"""
        try:
            from models.database import ParseResult
            
            # 获取文本解析结果
            text_result = db.query(ParseResult).filter(
                ParseResult.task_id == parse_task_id,
                ParseResult.content_type == "text"
            ).first()
            
            if text_result and os.path.exists(text_result.content_path):
                with open(text_result.content_path, 'r', encoding='utf-8') as f:
                    return f.read()
            
            # 如果没有文本结果，尝试从Markdown中提取
            markdown_result = db.query(ParseResult).filter(
                ParseResult.task_id == parse_task_id,
                ParseResult.content_type == "markdown"
            ).first()
            
            if markdown_result and os.path.exists(markdown_result.content_path):
                with open(markdown_result.content_path, 'r', encoding='utf-8') as f:
                    markdown_content = f.read()
                    # 移除Markdown标记
                    return re.sub(r'[#*`\[\]]', '', markdown_content)
            
            return None
            
        except Exception as e:
            return None
    
    async def _semantic_slice(
        self,
        text: str,
        chunk_size: int,
        overlap: int,
        min_chunk_size: int,
        task_logger
    ) -> List[str]:
        """语义切片"""
        try:
            task_logger.info("Performing semantic slicing", {
                "chunk_size": chunk_size,
                "overlap": overlap
            })
            
            # 分段处理
            paragraphs = self._split_into_paragraphs(text)
            
            chunks = []
            current_chunk = ""
            
            for paragraph in paragraphs:
                # 如果当前段落加上当前块不超过chunk_size，直接添加
                if len(current_chunk) + len(paragraph) <= chunk_size:
                    current_chunk += paragraph + "\n"
                else:
                    # 如果当前块不为空，先保存
                    if current_chunk and len(current_chunk) >= min_chunk_size:
                        chunks.append(current_chunk.strip())
                    
                    # 开始新的块
                    if len(paragraph) <= chunk_size:
                        current_chunk = paragraph + "\n"
                    else:
                        # 如果段落太长，需要进一步分割
                        sub_chunks = self._split_long_paragraph(
                            paragraph, chunk_size, overlap, min_chunk_size
                        )
                        chunks.extend(sub_chunks)
                        current_chunk = ""
            
            # 添加最后一个块
            if current_chunk and len(current_chunk) >= min_chunk_size:
                chunks.append(current_chunk.strip())
            
            task_logger.info("Semantic slicing completed", {
                "original_paragraphs": len(paragraphs),
                "final_chunks": len(chunks)
            })
            
            return chunks
            
        except Exception as e:
            task_logger.error("Semantic slicing failed", error=e)
            # 回退到固定切片
            return await self._fixed_slice(text, chunk_size, overlap, min_chunk_size, task_logger)
    
    def _split_into_paragraphs(self, text: str) -> List[str]:
        """将文本分割成段落"""
        # 按双换行符分割段落
        paragraphs = re.split(r'\n\s*\n', text)
        
        # 过滤空段落
        paragraphs = [p.strip() for p in paragraphs if p.strip()]
        
        return paragraphs
    
    def _split_long_paragraph(
        self,
        paragraph: str,
        chunk_size: int,
        overlap: int,
        min_chunk_size: int
    ) -> List[str]:
        """分割长段落"""
        chunks = []
        
        # 尝试按句子分割
        sentences = sent_tokenize(paragraph)
        
        current_chunk = ""
        for sentence in sentences:
            if len(current_chunk) + len(sentence) <= chunk_size:
                current_chunk += sentence + " "
            else:
                if current_chunk and len(current_chunk) >= min_chunk_size:
                    chunks.append(current_chunk.strip())
                
                # 如果句子太长，按词分割
                if len(sentence) > chunk_size:
                    words = word_tokenize(sentence)
                    temp_chunk = ""
                    
                    for word in words:
                        if len(temp_chunk) + len(word) + 1 <= chunk_size:
                            temp_chunk += word + " "
                        else:
                            if temp_chunk and len(temp_chunk) >= min_chunk_size:
                                chunks.append(temp_chunk.strip())
                            temp_chunk = word + " "
                    
                    if temp_chunk and len(temp_chunk) >= min_chunk_size:
                        chunks.append(temp_chunk.strip())
                    
                    current_chunk = ""
                else:
                    current_chunk = sentence + " "
        
        if current_chunk and len(current_chunk) >= min_chunk_size:
            chunks.append(current_chunk.strip())
        
        # 添加重叠
        if overlap > 0 and len(chunks) > 1:
            chunks_with_overlap = []
            for i, chunk in enumerate(chunks):
                if i == 0:
                    chunks_with_overlap.append(chunk)
                else:
                    # 从前一个块的末尾获取重叠内容
                    prev_chunk = chunks[i-1]
                    words = word_tokenize(prev_chunk)
                    overlap_words = words[-overlap//2:] if len(words) > overlap//2 else words
                    
                    overlap_text = " ".join(overlap_words)
                    chunks_with_overlap.append(overlap_text + " " + chunk)
            
            return chunks_with_overlap
        
        return chunks
    
    async def _paragraph_slice(
        self,
        text: str,
        chunk_size: int,
        overlap: int,
        min_chunk_size: int,
        task_logger
    ) -> List[str]:
        """段落切片"""
        try:
            task_logger.info("Performing paragraph slicing", {
                "chunk_size": chunk_size,
                "overlap": overlap
            })
            
            paragraphs = self._split_into_paragraphs(text)
            chunks = []
            
            for paragraph in paragraphs:
                if len(paragraph) <= chunk_size:
                    chunks.append(paragraph)
                else:
                    # 长段落按固定大小分割
                    sub_chunks = self._split_long_paragraph(
                        paragraph, chunk_size, overlap, min_chunk_size
                    )
                    chunks.extend(sub_chunks)
            
            task_logger.info("Paragraph slicing completed", {
                "original_paragraphs": len(paragraphs),
                "final_chunks": len(chunks)
            })
            
            return chunks
            
        except Exception as e:
            task_logger.error("Paragraph slicing failed", error=e)
            return await self._fixed_slice(text, chunk_size, overlap, min_chunk_size, task_logger)
    
    async def _fixed_slice(
        self,
        text: str,
        chunk_size: int,
        overlap: int,
        min_chunk_size: int,
        task_logger
    ) -> List[str]:
        """固定大小切片"""
        try:
            task_logger.info("Performing fixed size slicing", {
                "chunk_size": chunk_size,
                "overlap": overlap
            })
            
            chunks = []
            start = 0
            
            while start < len(text):
                end = start + chunk_size
                
                if end > len(text):
                    end = len(text)
                
                chunk = text[start:end]
                
                # 确保最小块大小
                if len(chunk) >= min_chunk_size or end == len(text):
                    chunks.append(chunk)
                
                start = end - overlap if overlap > 0 else end
            
            task_logger.info("Fixed size slicing completed", {
                "total_chunks": len(chunks)
            })
            
            return chunks
            
        except Exception as e:
            task_logger.error("Fixed size slicing failed", error=e)
            # 最后的备选方案
            return [text[i:i+chunk_size] for i in range(0, len(text), chunk_size-overlap)]
    
    async def _extract_position_info(self, chunk: str, full_text: str) -> Dict[str, Any]:
        """提取位置信息"""
        try:
            # 查找块在全文中的位置
            chunk_start = full_text.find(chunk)
            
            if chunk_start == -1:
                return {"position": "unknown"}
            
            chunk_end = chunk_start + len(chunk)
            
            # 计算相对位置
            relative_start = chunk_start / len(full_text) if len(full_text) > 0 else 0
            relative_end = chunk_end / len(full_text) if len(full_text) > 0 else 0
            
            return {
                "start_char": chunk_start,
                "end_char": chunk_end,
                "relative_start": relative_start,
                "relative_end": relative_end,
                "char_count": len(chunk)
            }
            
        except Exception as e:
            return {"position": "unknown", "error": str(e)}
    
    async def get_slice_statistics(self, slice_id: str, db) -> Dict[str, Any]:
        """获取切片统计信息"""
        try:
            from models.database import SliceTask, SliceChunk
            
            slice_task = db.query(SliceTask).filter(SliceTask.slice_id == slice_id).first()
            if not slice_task:
                return {}
            
            chunks = db.query(SliceChunk).filter(SliceChunk.slice_task_id == slice_task.id).all()
            
            if not chunks:
                return {}
            
            # 计算统计信息
            word_counts = [chunk.word_count for chunk in chunks]
            char_counts = [len(chunk.text_content) for chunk in chunks]
            
            statistics = {
                "total_chunks": len(chunks),
                "total_words": sum(word_counts),
                "total_characters": sum(char_counts),
                "avg_words_per_chunk": sum(word_counts) / len(word_counts),
                "avg_characters_per_chunk": sum(char_counts) / len(char_counts),
                "min_words": min(word_counts),
                "max_words": max(word_counts),
                "min_characters": min(char_counts),
                "max_characters": max(char_counts),
                "strategy": slice_task.strategy,
                "chunk_size": slice_task.chunk_size,
                "overlap": slice_task.overlap
            }
            
            return statistics
            
        except Exception as e:
            return {"error": str(e)}