"""
基于Ollama AI模型的智能文档分块处理器
使用AI模型进行语义感知的文档分块
"""
import json
import re
from typing import List, Dict, Any, Optional
import requests
from ..core.config import config


class AIDocumentChunker:
    """AI驱动的文档分块器"""
    
    def __init__(self, model_name: str = "deepseek-r1:1.5b", 
                 ollama_url: str = "http://localhost:11434"):
        self.model_name = model_name
        self.ollama_url = ollama_url
        self.chunk_size = config.get('documents.chunk_size', 1000)
        self.chunk_overlap = config.get('documents.chunk_overlap', 200)
    
    def chunk_with_ai(self, text: str, strategy: str = "semantic") -> List[str]:
        """使用AI模型进行智能分块"""
        if not text or len(text) < 100:
            return [text] if text else []
        
        # 检查文本长度，如果太长可能导致超时
        if len(text) > 5000:
            print(f"文本过长({len(text)}字符)，使用传统分块")
            return self._fallback_chunking(text)
        
        try:
            if strategy == "semantic":
                return self._semantic_ai_chunking(text)
            elif strategy == "topic":
                return self._topic_ai_chunking(text)
            elif strategy == "hybrid":
                return self._hybrid_ai_chunking(text)
            else:
                return self._semantic_ai_chunking(text)
        except Exception as e:
            print(f"AI分块失败，回退到传统分块: {e}")
            return self._fallback_chunking(text)
    
    def _semantic_ai_chunking(self, text: str) -> List[str]:
        """语义感知分块"""
        prompt = f"""
请将以下文本按照语义完整性进行分块。要求：
1. 每个块应该是一个完整的语义单元
2. 块的大小控制在{self.chunk_size}字符左右
3. 相邻块之间可以有{self.chunk_overlap}字符的重叠
4. 优先在段落、句子或逻辑边界处分割
5. 保持内容的连贯性和完整性

文本内容：
{text}

请以JSON格式返回分块结果，格式如下：
{{
    "chunks": [
        "第一个语义块的内容",
        "第二个语义块的内容",
        ...
    ],
    "reasoning": "分块理由说明"
}}
"""
        
        response = self._call_ollama(prompt)
        return self._parse_ai_response(response)
    
    def _topic_ai_chunking(self, text: str) -> List[str]:
        """主题感知分块"""
        prompt = f"""
请将以下文本按照主题进行分块。要求：
1. 每个块应该围绕一个明确的主题
2. 块的大小控制在{self.chunk_size}字符左右
3. 相邻块之间可以有{self.chunk_overlap}字符的重叠
4. 识别主题转换点进行分割
5. 为每个块标注主题

文本内容：
{text}

请以JSON格式返回分块结果，格式如下：
{{
    "chunks": [
        {{
            "content": "块内容",
            "topic": "主题描述"
        }},
        ...
    ],
    "reasoning": "分块理由说明"
}}
"""
        
        response = self._call_ollama(prompt)
        return self._parse_topic_response(response)
    
    def _hybrid_ai_chunking(self, text: str) -> List[str]:
        """混合分块策略"""
        # 首先使用AI进行语义分析
        semantic_chunks = self._semantic_ai_chunking(text)
        
        # 然后对过大的块进行进一步分割
        final_chunks = []
        for chunk in semantic_chunks:
            if len(chunk) > self.chunk_size * 1.5:
                # 对过大的块使用传统方法分割
                sub_chunks = self._fallback_chunking(chunk)
                final_chunks.extend(sub_chunks)
            else:
                final_chunks.append(chunk)
        
        return final_chunks
    
    def _call_ollama(self, prompt: str) -> str:
        """调用Ollama API"""
        try:
            payload = {
                "model": self.model_name,
                "prompt": prompt,
                "stream": False,
                "options": {
                    "temperature": 0.1,  # 低温度确保一致性
                    "top_p": 0.9,
                    "num_predict": 1024  # 减少预测长度，提高速度
                }
            }
            
            response = requests.post(
                f"{self.ollama_url}/api/generate",
                json=payload,
                timeout=30  # 减少超时时间
            )
            
            if response.status_code == 200:
                return response.json().get("response", "")
            else:
                raise Exception(f"Ollama API错误: {response.status_code}")
                
        except Exception as e:
            raise Exception(f"调用Ollama失败: {e}")
    
    def _parse_ai_response(self, response: str) -> List[str]:
        """解析AI响应"""
        try:
            # 尝试提取JSON部分
            json_match = re.search(r'\{.*\}', response, re.DOTALL)
            if json_match:
                data = json.loads(json_match.group())
                chunks = data.get("chunks", [])
                if chunks and isinstance(chunks, list):
                    return [chunk.strip() for chunk in chunks if chunk.strip()]
            
            # 如果JSON解析失败，尝试按行分割
            lines = response.strip().split('\n')
            chunks = []
            current_chunk = ""
            
            for line in lines:
                line = line.strip()
                if not line or line.startswith('{') or line.startswith('}'):
                    continue
                
                if line.startswith('"') and line.endswith('",'):
                    # 提取引号内的内容
                    content = line[1:-2]
                    if content:
                        chunks.append(content)
                else:
                    current_chunk += line + " "
                    if len(current_chunk) > self.chunk_size:
                        chunks.append(current_chunk.strip())
                        current_chunk = ""
            
            if current_chunk.strip():
                chunks.append(current_chunk.strip())
            
            return chunks if chunks else [response.strip()]
            
        except Exception as e:
            print(f"解析AI响应失败: {e}")
            return [response.strip()]
    
    def _parse_topic_response(self, response: str) -> List[str]:
        """解析主题分块响应"""
        try:
            json_match = re.search(r'\{.*\}', response, re.DOTALL)
            if json_match:
                data = json.loads(json_match.group())
                chunks_data = data.get("chunks", [])
                if chunks_data and isinstance(chunks_data, list):
                    chunks = []
                    for chunk_data in chunks_data:
                        if isinstance(chunk_data, dict):
                            content = chunk_data.get("content", "")
                            topic = chunk_data.get("topic", "")
                            if content:
                                chunks.append(f"[主题: {topic}] {content}")
                        elif isinstance(chunk_data, str):
                            chunks.append(chunk_data)
                    return [chunk.strip() for chunk in chunks if chunk.strip()]
            
            return self._parse_ai_response(response)
            
        except Exception as e:
            print(f"解析主题响应失败: {e}")
            return self._parse_ai_response(response)
    
    def _fallback_chunking(self, text: str) -> List[str]:
        """传统分块方法作为回退"""
        if len(text) <= self.chunk_size:
            return [text] if text else []
        
        chunks = []
        start = 0
        
        while start < len(text):
            end = start + self.chunk_size
            
            if end < len(text):
                # 寻找最近的句号或换行
                period_pos = text.rfind('。', start, end)
                newline_pos = text.rfind('\n', start, end)
                
                if period_pos > start and period_pos > newline_pos:
                    end = period_pos + 1
                elif newline_pos > start:
                    end = newline_pos + 1
            
            chunk = text[start:end].strip()
            if chunk:
                chunks.append(chunk)
            
            start = end - self.chunk_overlap
            if start >= len(text):
                break
        
        return chunks
    
    def get_chunk_analysis(self, text: str, strategy: str = "semantic") -> Dict[str, Any]:
        """获取分块分析结果"""
        chunks = self.chunk_with_ai(text, strategy)
        
        if not chunks:
            return {
                'chunk_count': 0,
                'avg_chunk_size': 0,
                'min_chunk_size': 0,
                'max_chunk_size': 0,
                'strategy': strategy,
                'model': self.model_name,
                'chunks': []
            }
        
        chunk_sizes = [len(chunk) for chunk in chunks]
        
        return {
            'chunk_count': len(chunks),
            'avg_chunk_size': sum(chunk_sizes) / len(chunk_sizes),
            'min_chunk_size': min(chunk_sizes),
            'max_chunk_size': max(chunk_sizes),
            'strategy': strategy,
            'model': self.model_name,
            'chunks': chunks
        }


# 使用示例
if __name__ == "__main__":
    # 测试文本
    test_text = """
    人工智能（Artificial Intelligence，AI）是计算机科学的一个分支，它企图了解智能的实质，
    并生产出一种新的能以人类智能相似的方式做出反应的智能机器。该领域的研究包括机器人、
    语言识别、图像识别、自然语言处理和专家系统等。
    
    机器学习是人工智能的一个重要分支，它使计算机能够在没有明确编程的情况下学习和改进。
    通过分析大量数据，机器学习算法可以识别模式并做出预测或决策。
    
    深度学习是机器学习的一个子集，它使用多层神经网络来模拟人脑的工作方式。
    这种方法在图像识别、语音识别和自然语言处理等领域取得了突破性进展。
    """
    
    # 测试AI分块
    chunker = AIDocumentChunker(model_name="deepseek-r1:1.5b")
    
    strategies = ["semantic", "topic", "hybrid"]
    
    for strategy in strategies:
        print(f"\n🔍 {strategy.upper()} 分块策略:")
        print("=" * 50)
        
        analysis = chunker.get_chunk_analysis(test_text, strategy)
        
        print(f"模型: {analysis['model']}")
        print(f"块数: {analysis['chunk_count']}")
        print(f"平均大小: {analysis['avg_chunk_size']:.1f}")
        print(f"大小范围: {analysis['min_chunk_size']} - {analysis['max_chunk_size']}")
        
        print("\n分块结果:")
        for i, chunk in enumerate(analysis['chunks'], 1):
            print(f"\n块 {i} ({len(chunk)} 字符):")
            print(f"  {chunk[:100]}{'...' if len(chunk) > 100 else ''}") 