import asyncio
import aiofiles
import os
from typing import List, Dict, Any, Optional, AsyncGenerator
from PyPDF2 import PdfReader
from docx import Document
from pydantic import BaseModel
from scorpio.core.common import get_logger

logger = get_logger(__name__)

class ChunkConfig(BaseModel):
    """分块配置类"""
    chunk_size: int = 1000
    chunk_overlap: int = 200
    strategy: str = "sentence"  # sentence, paragraph, fixed
    separator: str = ". "
    include_metadata: bool = True
    max_chunks: Optional[int] = None
    
class IngestionService:
    """文档摄取服务，支持分块和流式输出"""
    
    def __init__(self, chunk_config: ChunkConfig):
        self.supported_formats = ['.pdf', '.docx', '.txt', '.md']
        self.chunk_config = chunk_config or ChunkConfig()
        self.chunk_count = 0
    async def chunk(
        self, 
        file_path: str,
    ) -> AsyncGenerator[Dict[str, Any], None]:
        """将文本内容按照指定策略切分，并以流式形式输出"""

        # 提取文本内容
        logger.info(f"start to ingest file: {file_path}")
        content = await self._extract_text(file_path)
        file_ext = os.path.splitext(file_path)[1].lower()
        if file_ext not in self.supported_formats:
            raise ValueError(f"不支持的文件格式: {file_ext}")
        
        
        logger.info(f"chunk config: {self.chunk_config}")
        
        # 根据策略选择分块方法
        if self.chunk_config.strategy == "sentence":
            chunks_generator = self._build_sentence_spliter(content)
        elif self.chunk_config.strategy == "paragraph":
            chunks_generator = self._build_paragraph_spliter(content)
        elif self.chunk_config.strategy == "fixed":
            chunks_generator = self._build_fixed_spliter(content)
        else:
            raise ValueError(f"不支持的分块策略: {self.chunk_config.strategy}")
        logger.info(f"start to output stream chunks...")
        # 流式输出分块结果
        chunk_count = 0
        async for chunk in chunks_generator:
            if self.chunk_config.max_chunks and chunk_count >= self.chunk_config.max_chunks:
                break
                
            yield chunk
            chunk_count += 1
        self.chunk_count = chunk_count
        logger.info(f"分块处理完成，共生成 {chunk_count} 个块")
    
    async def _extract_text(self, file_path: str) -> str:
        """提取文件文本内容"""
        file_ext = os.path.splitext(file_path)[1].lower()
        
        if file_ext == '.pdf':
            return await self._extract_pdf_text(file_path)
        elif file_ext == '.docx':
            return await self._extract_docx_text(file_path)
        elif file_ext in ['.txt', '.md']:
            return await self._extract_text_file(file_path)
        else:
            raise ValueError(f"不支持的文件格式: {file_ext}")
    
    async def _extract_pdf_text(self, file_path: str) -> str:
        """提取PDF文本"""
        loop = asyncio.get_event_loop()
        
        def sync_extract():
            with open(file_path, 'rb') as file:
                reader = PdfReader(file)
                text = ""
                for page in reader.pages:
                    text += page.extract_text() + "\n"
                return text
        
        return await loop.run_in_executor(None, sync_extract)
    
    async def _extract_docx_text(self, file_path: str) -> str:
        """提取DOCX文本"""
        loop = asyncio.get_event_loop()
        
        def sync_extract():
            doc = Document(file_path)
            text = ""
            for paragraph in doc.paragraphs:
                text += paragraph.text + "\n"
            return text
        
        return await loop.run_in_executor(None, sync_extract)
    
    async def _extract_text_file(self, file_path: str) -> str:
        """提取文本文件内容"""
        async with aiofiles.open(file_path, 'r', encoding='utf-8') as file:
            return await file.read()
    
    async def _build_sentence_spliter(
        self, 
        text: str 
    ) -> AsyncGenerator[Dict[str, Any], None]:
        """构建句子分割器"""
        chunk_config = self.chunk_config
        # 按句子分割
        sentences = text.split(chunk_config.separator)
        
        current_chunk = ""
        chunk_index = 0
        start = 0
        
        for sentence in sentences:
            sentence = sentence.strip()
            if not sentence:
                continue
                
            # 如果添加这个句子不会超过chunk_size，就添加到当前块
            if len(current_chunk) + len(sentence) <= chunk_config.chunk_size:
                current_chunk += sentence + chunk_config.separator
            else:
                # 保存当前块
                if current_chunk:
                    chunk_data = {
                        'chunk_index': chunk_index,
                        'content': current_chunk.strip(),
                    }
                    if chunk_config.include_metadata:
                        chunk_data['metadata'] = {
                            'start_char': start,
                            'end_char': start + len(current_chunk),
                            'strategy': 'sentence'
                        }
                    yield chunk_data
                    chunk_index += 1
                    start += len(current_chunk) - chunk_config.chunk_overlap
                
                # 开始新块，考虑重叠
                current_chunk = sentence + chunk_config.separator
        
        # 添加最后一个块
        if current_chunk:
            chunk_data = {
                'chunk_index': chunk_index,
                'content': current_chunk.strip(),
            }
            if chunk_config.include_metadata:
                chunk_data['metadata'] = {
                    'start_char': start,
                    'end_char': start + len(current_chunk),
                    'strategy': 'sentence'
                }
            yield chunk_data
    
    async def _build_paragraph_spliter(
        self, 
        text: str 
    ) -> AsyncGenerator[Dict[str, Any], None]:
        """构建段落分割器"""
        chunk_config = self.chunk_config
        # 按段落分割
        paragraphs = text.split('\n\n')
        
        current_chunk = ""
        chunk_index = 0
        start = 0
        
        for paragraph in paragraphs:
            paragraph = paragraph.strip()
            if not paragraph:
                continue
                
            # 如果添加这个段落不会超过chunk_size，就添加到当前块
            if len(current_chunk) + len(paragraph) <= chunk_config.chunk_size:
                current_chunk += paragraph + "\n\n"
            else:
                # 保存当前块
                if current_chunk:
                    chunk_data = {
                        'chunk_index': chunk_index,
                        'content': current_chunk.strip(),
                    }
                    if chunk_config.include_metadata:
                        chunk_data['metadata'] = {
                            'start_char': start,
                            'end_char': start + len(current_chunk),
                            'strategy': 'paragraph'
                        }
                    yield chunk_data
                    chunk_index += 1
                    start += len(current_chunk) - chunk_config.chunk_overlap
                
                # 开始新块，考虑重叠
                current_chunk = paragraph + "\n\n"
        
        # 添加最后一个块
        if current_chunk:
            chunk_data = {
                'chunk_index': chunk_index,
                'content': current_chunk.strip(),
            }
            if chunk_config.include_metadata:
                chunk_data['metadata'] = {
                    'start_char': start,
                    'end_char': start + len(current_chunk),
                    'strategy': 'paragraph'
                }
            yield chunk_data
    
    async def _build_fixed_spliter(
        self, 
        text: str 
    ) -> AsyncGenerator[Dict[str, Any], None]:
        """构建固定长度分割器"""
        chunk_config = self.chunk_config
        
        chunk_index = 0
        start = 0
        
        while start < len(text):
            end = min(start + chunk_config.chunk_size, len(text))
            
            # 确保不在单词中间分割
            if end < len(text):
                # 找到最近的空格或标点
                while end > start and text[end] not in [' ', '.', ',', ';', '!', '?', '\n']:
                    end -= 1
                
                # 如果没有找到合适的分割点，强制分割
                if end == start:
                    end = start + chunk_config.chunk_size
            
            chunk_content = text[start:end].strip()
            
            if chunk_content:
                chunk_data = {
                    'chunk_index': chunk_index,
                    'content': chunk_content,
                }
                if chunk_config.include_metadata:
                    chunk_data['metadata'] = {
                        'start_char': start,
                        'end_char': end,
                        'strategy': 'fixed'
                    }
                yield chunk_data
                chunk_index += 1
            
            # 移动到下一个位置，考虑重叠
            start = end - chunk_config.chunk_overlap
            if start < 0:
                start = 0
    
    async def _split_text(
        self, 
        text: str, 
        chunk_size: int = 1000, 
        chunk_overlap: int = 200
    ) -> List[Dict[str, Any]]:
        """文本分块（向后兼容的方法）"""
        
        chunk_config = ChunkConfig(
            chunk_size=chunk_size,
            chunk_overlap=chunk_overlap
        )
        
        chunks = []
        async for chunk in self._build_sentence_spliter(text, chunk_config):
            chunks.append(chunk)
        
        return chunks