"""
向量嵌入
"""

from FlagEmbedding import LLMEmbedder
from core.config import settings
from core.document import Document
from typing import List
from threading import RLock
from loguru import logger


class Embeddings(object):

    lock = RLock()

    def __new__(cls, *args, **kwargs):
        with Embeddings.lock:
            if not hasattr(Embeddings, "_instance"):
                Embeddings._instance = object.__new__(cls)
                cls.embedder = LLMEmbedder(settings.EMBEDDING_MODEL, use_fp16=False)
        return Embeddings._instance

    def embed_documents(self, documents: List[Document], task='qa', batch_size=8) -> List[List[float]]:
        """分批次embedding"""

        embeddings = []
        for i in range(0, len(documents), batch_size):
            chunk_documents = documents[i: i + batch_size]
            batch_contents = [doc.content for doc in chunk_documents]

            # 这里有极小的概率会报错，目前没有解决办法，因此只能跳过出错的文本。
            try:
                batch_embeddings = self.embedder.encode_keys(batch_contents, max_length=5000, task=task)
                embeddings.extend(batch_embeddings.tolist())
            except:
                for content in batch_contents:
                    try:
                        item_embedding = self.embedder.encode_keys(content, max_length=5000, task=task)
                    except Exception as error_info:
                        logger.error(f"embedding 如下文本报错：{content}")
                        logger.error(error_info)
                        item_embedding = self.embedder.encode_keys('', max_length=5000, task=task)
                    embeddings.append(item_embedding.tolist())

        return embeddings

    def embed_query(self, text: str, task='qa') -> List[float]:
        query_embedding = self.embedder.encode_queries(text, max_length=256,task=task)
        return query_embedding.tolist()