from chromadb import Embeddings
from modelscope import AutoTokenizer, AutoModel
import torch


class BGE_Embed(Embeddings):
    def __init__(self):
        # 加载 bge-small-zh
        self.tokenizer = AutoTokenizer.from_pretrained('./data/BAAI/bge-small-zh', )
        self.model = AutoModel.from_pretrained('./data/BAAI/bge-small-zh')
        self.model.eval()

    # 对文档进行词嵌入
    def embed_documents(self, texts: list[str]) -> list[list[float]]:
        encoded_input = self.tokenizer(texts, padding=True, truncation=True, return_tensors='pt')
        with torch.no_grad():
            model_output = self.model(**encoded_input)
            sentence_embeddings = model_output[0][:, 0]
        return sentence_embeddings.detach().numpy()

    # 对查询语句进行嵌入
    def embed_query(self, text: str) -> list[float]:
        print(text) # 问题根源：history 与 input 同时出现在一个字典对象中，后续内容无法解析并且记录
        encoded_input = self.tokenizer([text], padding=True, truncation=True, return_tensors='pt')
        with torch.no_grad():
            model_output = self.model(**encoded_input)
            sentence_embeddings = model_output[0][:, 0]
        return sentence_embeddings.detach().numpy().tolist()[0]
