# app/rag/embedding/ollama.py

from .base import EmbeddingProvider
from langchain_ollama import OllamaEmbeddings
from typing import List
from ...core.logger import logger


class OllamaEmbedding(EmbeddingProvider):
    def __init__(
        self,
        model: str = "bge-large-zh-v1.5",
        base_url: str = "http://localhost:11434"
    ):
        """
        初始化 Ollama Embedding
        :param model: 模型名
        :param base_url: Ollama 服务地址
        """
        self.model = model
        self.base_url = base_url
        self.embedding = OllamaEmbeddings(
            model=model,
            base_url=base_url
        )

    def embed_documents(self, texts: List[str]) -> List[List[float]]:
        return self.embedding.embed_documents(texts)

    def embed_query(self, text: str) -> List[float]:
        return self.embedding.embed_query(text)

    def embed_documents(self, texts: List[str]) -> List[List[float]]:
        try:
            if not self.embedding:
                self._initialize_embedding()
            
            # 过滤空文本
            texts = [t for t in texts if t and t.strip()]
            if not texts:
                return []
                
            return self.embedding.embed_documents(texts)
        except Exception as e:
            logger.error(f"批量文档嵌入失败: {str(e)}")
            # 返回默认向量以避免整个操作失败
            return [[0.0] * 1024 for _ in texts]

    def embed_query(self, text: str) -> List[float]:
        try:
            if not self.embedding:
                self._initialize_embedding()
                
            if not text or not text.strip():
                return [0.0] * 1024
                
            return self.embedding.embed_query(text)
        except Exception as e:
            logger.error(f"查询嵌入失败: {str(e)}")
            # 返回默认向量以避免整个操作失败
            return [0.0] * 1024
    
    def get_embedding(self):
        """
        返回底层的 embedding 对象，用于向量数据库
        """
        if not self.embedding:
            self._initialize_embedding()
        return self.embedding
