# app/embedding.py
from langchain_core.embeddings import Embeddings
from langchain_ollama import OllamaEmbeddings
from langchain_community.embeddings import HuggingFaceEmbeddings
from langgraph_memory.src.config.config import Settings
import asyncio

class EmbeddingModel:
    def __init__(self):
        model_name = Settings.EMBEDDING_MODEL

        # 根据模型名选择 Embedding 类
        if model_name.startswith("openai:"):
            # e.g. "openai:text-embedding-3-small"
            actual_model = model_name.split(":", 1)[1]
            self.embedder: Embeddings = OllamaEmbeddings(
                model=actual_model,
                base_url=Settings.EMBEDDING_URL
            )
        elif model_name.startswith("huggingface:"):
            # e.g. "huggingface:sentence-transformers/all-MiniLM-L6-v2"
            model_path = model_name.split(":", 1)[1]
            self.embedder = HuggingFaceEmbeddings(
                model_name=model_path
            )
        else:
            # 默认当作 HuggingFace 模型处理
            self.embedder = HuggingFaceEmbeddings(
                model_name=model_name
            )

        # 获取维度（首次调用时动态获取）
        self._dim = None

    @property
    def dim(self) -> int:
        if self._dim is None:
            # 用一个简单句子测试获取维度
            test_vec = self.embed("test")
            self._dim = len(test_vec)
        return self._dim

    def embed(self, text: str) -> list[float]:
        """同步接口，兼容原有调用方式"""
        return self.embedder.embed_query(text)

    async def aembed(self, text: str) -> list[float]:
        """异步接口（可选）"""
        if hasattr(self.embedder, "aembed_query"):
            return await self.embedder.aembed_query(text)
        else:
            # 降级到同步
            return self.embed(text)

    def embed_batch(self, texts: list[str]) -> list[list[float]]:
        """批量接口（用于优化性能）"""
        return self.embedder.embed_documents(texts)