"""RAG 知识库服务"""
import os
import json
import logging
from typing import List, Dict, Any, Optional
from pathlib import Path
import numpy as np
from sqlalchemy.orm import Session

from app import models
from app.config import settings

logger = logging.getLogger(__name__)


class RAGService:
    """RAG 知识库服务"""
    
    def __init__(self):
        self.chunk_size = 500  # 默认切分大小
        self.chunk_overlap = 50  # 默认重叠大小
    
    def extract_text_from_file(self, file_path: str) -> str:
        """从文件中提取文本"""
        file_ext = Path(file_path).suffix.lower()
        
        try:
            if file_ext == '.txt':
                with open(file_path, 'r', encoding='utf-8') as f:
                    return f.read()
            
            elif file_ext == '.md':
                with open(file_path, 'r', encoding='utf-8') as f:
                    return f.read()
            
            elif file_ext == '.pdf':
                try:
                    from pypdf import PdfReader
                    reader = PdfReader(file_path)
                    text = ""
                    for page in reader.pages:
                        text += page.extract_text() + "\n"
                    return text
                except Exception as e:
                    logger.error(f"PDF 解析失败: {e}")
                    raise ValueError(f"PDF 解析失败: {str(e)}")
            
            elif file_ext in ['.docx', '.doc']:
                try:
                    from docx import Document
                    doc = Document(file_path)
                    text = ""
                    for para in doc.paragraphs:
                        text += para.text + "\n"
                    return text
                except Exception as e:
                    logger.error(f"DOCX 解析失败: {e}")
                    raise ValueError(f"DOCX 解析失败: {str(e)}")
            
            else:
                raise ValueError(f"不支持的文件类型: {file_ext}")
        
        except Exception as e:
            logger.error(f"文件解析失败 {file_path}: {e}")
            raise
    
    def split_text(self, text: str, chunk_size: int = None, chunk_overlap: int = None) -> List[str]:
        """切分文本为多个块"""
        chunk_size = chunk_size or self.chunk_size
        chunk_overlap = chunk_overlap or self.chunk_overlap
        
        if not text or len(text) == 0:
            return []
        
        chunks = []
        start = 0
        text_length = len(text)
        
        while start < text_length:
            end = start + chunk_size
            chunk = text[start:end]
            
            if chunk.strip():
                chunks.append(chunk.strip())
            
            start += chunk_size - chunk_overlap
        
        return chunks
    
    def get_embedding(self, text: str, model_name: str = "text-embedding-v3") -> List[float]:
        """获取文本的向量表示"""
        try:
            import openai
            
            # 使用 Qwen 的 embedding API（OpenAI 兼容）
            client = openai.OpenAI(
                api_key=settings.QWEN_API_KEY,
                base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
            )
            
            response = client.embeddings.create(
                model=model_name,
                input=text
            )
            
            return response.data[0].embedding
        
        except Exception as e:
            logger.error(f"获取 embedding 失败: {e}")
            raise
    
    def cosine_similarity(self, vec1: List[float], vec2: List[float]) -> float:
        """计算余弦相似度"""
        vec1 = np.array(vec1)
        vec2 = np.array(vec2)
        
        dot_product = np.dot(vec1, vec2)
        norm1 = np.linalg.norm(vec1)
        norm2 = np.linalg.norm(vec2)
        
        if norm1 == 0 or norm2 == 0:
            return 0.0
        
        return float(dot_product / (norm1 * norm2))
    
    async def process_document(
        self,
        db: Session,
        document_id: int,
        kb_config: Optional[Dict[str, Any]] = None
    ):
        """处理文档：提取文本、切分、向量化、存储"""
        try:
            # 获取文档信息
            document = db.query(models.Document).filter(
                models.Document.id == document_id
            ).first()
            
            if not document:
                raise ValueError(f"文档不存在: {document_id}")
            
            # 更新状态为处理中
            document.status = "processing"
            db.commit()
            
            # 1. 提取文本
            logger.info(f"开始处理文档: {document.title}")
            text = self.extract_text_from_file(document.file_path)
            
            if not text or len(text.strip()) == 0:
                raise ValueError("文档内容为空")
            
            # 2. 获取知识库配置
            kb = document.knowledge_base
            config = kb_config or kb.config or {}
            chunk_size = config.get('chunk_size', self.chunk_size)
            chunk_overlap = config.get('chunk_overlap', self.chunk_overlap)
            embedding_model = kb.embedding_model or "text-embedding-v3"
            
            # 3. 切分文本
            logger.info(f"切分文本，chunk_size={chunk_size}, overlap={chunk_overlap}")
            chunks = self.split_text(text, chunk_size, chunk_overlap)
            logger.info(f"切分完成，共 {len(chunks)} 个块")
            
            # 4. 向量化并存储
            logger.info("开始向量化...")
            for i, chunk_text in enumerate(chunks):
                # 获取 embedding
                embedding = self.get_embedding(chunk_text, embedding_model)
                
                # 存储到数据库
                chunk = models.DocumentChunk(
                    doc_id=document.id,
                    kb_id=document.kb_id,
                    content=chunk_text,
                    embedding=json.dumps(embedding),  # 存储为 JSON 字符串
                    meta={"chunk_index": i, "chunk_total": len(chunks)}
                )
                db.add(chunk)
                
                if (i + 1) % 10 == 0:
                    logger.info(f"已处理 {i + 1}/{len(chunks)} 个块")
            
            # 5. 更新文档状态为完成
            document.status = "completed"
            document.error_msg = None
            db.commit()
            
            logger.info(f"文档处理完成: {document.title}")
        
        except Exception as e:
            logger.error(f"文档处理失败 {document_id}: {e}", exc_info=True)
            
            # 更新文档状态为失败
            document = db.query(models.Document).filter(
                models.Document.id == document_id
            ).first()
            if document:
                document.status = "failed"
                document.error_msg = str(e)
                db.commit()
            
            raise
    
    def search_similar_chunks(
        self,
        db: Session,
        kb_id: int,
        query: str,
        top_k: int = 3,
        threshold: float = 0.5
    ) -> List[Dict[str, Any]]:
        """在知识库中搜索相似的文本块"""
        try:
            # 1. 获取知识库配置
            kb = db.query(models.KnowledgeBase).filter(
                models.KnowledgeBase.id == kb_id
            ).first()
            
            if not kb:
                logger.warning(f"知识库不存在: {kb_id}")
                return []
            
            embedding_model = kb.embedding_model or "text-embedding-v3"
            
            # 2. 获取查询文本的向量
            query_embedding = self.get_embedding(query, embedding_model)
            
            # 3. 获取知识库中的所有 chunks
            chunks = db.query(models.DocumentChunk).filter(
                models.DocumentChunk.kb_id == kb_id
            ).all()
            
            if not chunks:
                logger.warning(f"知识库 {kb_id} 中没有文档块")
                return []
            
            # 4. 计算相似度并排序
            similarities = []
            for chunk in chunks:
                if not chunk.embedding:
                    continue
                
                try:
                    chunk_embedding = json.loads(chunk.embedding)
                    similarity = self.cosine_similarity(query_embedding, chunk_embedding)
                    
                    if similarity >= threshold:
                        similarities.append({
                            "chunk_id": chunk.id,
                            "content": chunk.content,
                            "similarity": similarity,
                            "meta": chunk.meta
                        })
                except Exception as e:
                    logger.error(f"计算相似度失败 chunk {chunk.id}: {e}")
                    continue
            
            # 5. 排序并返回 top_k
            similarities.sort(key=lambda x: x['similarity'], reverse=True)
            results = similarities[:top_k]
            
            logger.info(f"检索到 {len(results)} 个相关文档块")
            return results
        
        except Exception as e:
            logger.error(f"检索失败: {e}", exc_info=True)
            return []


# 全局单例
rag_service = RAGService()

