from load_embedding import EmbeddingService
from recall import HybridRetriever
from split_doc import DocumentSplitter
from doc_loader import DocumentLoader
from openai import OpenAI
import os
from typing import Literal, List, Dict, Tuple
from utils.logger import logger
import time
import pickle
from datetime import datetime, timedelta
import shutil
import sqlite3
from config import Config


class DocumentVectorizer:
    """文档向量化服务"""
    def __init__(self):
        self.embedding_service = EmbeddingService()
        self.splitter = DocumentSplitter(split_mode=Config.DEFAULT_SPLIT_MODE)
        self.doc_loader = DocumentLoader()
        self.llm_client = OpenAI(
            api_key=Config.LLM_API_KEY,
            base_url=Config.LLM_BASE_URL,
        )  # 添加大模型客户端
    
    def vectorize_documents(self, directory_path: str) -> Tuple[List[str], List[str], List[List[float]], List[str]]:
        """
        将目录下的文档向量化
        Returns:
            Tuple[List[str], List[str], List[List[float]], List[str]]: (文档块列表, 问答对列表, 向量列表, 文档类型列表)
        """
        try:
            # 加载文档
            doc_contents = self.doc_loader.load_directory(directory_path)
            if not doc_contents:
                logger.warning("未找到任何有效文档")
                return [], [], [], []
                
            # 分割所有文档
            all_chunks, all_qa_pairs, all_doc_types = [], [], []
            for content in doc_contents.values():
                # 进行分割
                chunks, qa_pairs, doc_types = self.splitter.split_text(content, client=self.llm_client)
                all_chunks.extend(chunks)
                all_qa_pairs.extend(qa_pairs)
                all_doc_types.extend(doc_types)
            
            # 生成向量
            embeddings = self.embedding_service.get_embeddings(all_chunks)
            if embeddings is None:
                return [], [], [], []
                
            return all_chunks, all_qa_pairs, embeddings, all_doc_types
            
        except Exception as e:
            logger.error(f"向量化文档失败: {str(e)}")
            return [], [], [], []

class SearchService:
    """检索服务"""
    def __init__(self):
        self.embedding_service = EmbeddingService()
        self.retriever = HybridRetriever()
        self.query_cache = {}  # 查询缓存
        self.cache_dir = Config.CACHE_DIR
        self.cache_file = Config.CACHE_FILE
        os.makedirs(self.cache_dir, exist_ok=True)
        
    def add_documents(self, chunks: List[str], qa_pairs: List[str], embeddings: List[List[float]], doc_types: List[str] = None):
        """添加文档到检索系统"""
        try:
            # 添加到检索器
            self.retriever.add_documents(chunks, qa_pairs, embeddings, doc_types)
            logger.info(f"成功添加 {len(chunks)} 个文档块，类型分布: QA={doc_types.count('qa') if doc_types else 0}, 段落={doc_types.count('paragraph') if doc_types else len(chunks)}")
        except Exception as e:
            logger.error(f"添加文档失败: {str(e)}")
            raise
            
    def get_documents_by_indices(self, indices: List[int]) -> List[str]:
        """根据索引获取文档内容"""
        try:
            if not indices:
                return []
                
            # 使用 IN 语句优化查询
            placeholders = ','.join('?' * len(indices))
            query = f"SELECT content FROM documents WHERE chunk_index IN ({placeholders}) ORDER BY chunk_index"
            
            conn = sqlite3.connect(self.retriever.db_path)
            c = conn.cursor()
            c.execute(query, indices)
            results = c.fetchall()
            conn.close()
            
            # 保持返回顺序与索引顺序一致
            documents = []
            result_map = {row[0]: row[1] for row in results}
            for idx in indices:
                if idx in result_map:
                    documents.append(result_map[idx])
                    
            return documents
            
        except Exception as e:
            logger.error(f"获取文档内容失败: {str(e)}")
            return []
            
    def search(self, query: str, top_k: int = 0) -> List[Dict]:
        """
        搜索相关文档
        Args:
            query: 查询文本
            top_k: 返回结果数量，如果为0则使用默认值3
            
        Returns:
            List[Dict]: 包含 score 和 chunk_index 的结果列表
        """
        try:
            # 检查并修正 top_k
            if top_k <= 0:
                top_k = Config.DEFAULT_TOP_K  # 默认值为3
                logger.info(f"top_k 被设为0，使用默认值: {top_k}")
            
            # 检查缓存
            cache_key = f"{query}_{top_k}"
            if cache_key in self.query_cache:
                return self.query_cache[cache_key]
                
            # 获取查询向量
            start = time.time()
            query_embedding = self.embedding_service.get_query_embedding(query, "search")
            logger.info(f"获取查询向量耗时: {(time.time()-start)*1000:.2f}ms")
            if query_embedding is None:
                return []
                
            # 执行检索
            start = time.time()
            results = self.retriever.search(query, query_embedding, top_k)
            logger.info(f"检索耗时: {(time.time()-start)*1000:.2f}ms")
            
            # 直接返回检索结果（包含 score 和 chunk_index）
            self.query_cache[cache_key] = results
            return results
            
        except Exception as e:
            logger.error(f"搜索失败: {str(e)}")
            return []
            
    def save_index(self, path: str):
        """保存索引"""
        try:
            self.retriever.save(path)
            logger.info(f"索引已保存到: {path}")
        except Exception as e:
            logger.error(f"保存索引失败: {str(e)}")
            raise
            
    def load_index(self, path: str):
        """加载索引"""
        try:
            self.retriever.load(path)
            logger.info(f"索引已从 {path} 加载")
        except Exception as e:
            logger.error(f"加载索引失败: {str(e)}")
            raise
