
# 实现lc Embeddings类
from langchain_core.embeddings import Embeddings
from transformers import AutoTokenizer, AutoModel
import torch
class CustomEmbeddings(Embeddings):

    def __init__(self, model_path=r'D:\code\other\LLMs\third\tiny-universe\content\TinyRAG\data\embd_model\bge-base-zh-v1.5'):
        # Load model from HuggingFace Hub
        self.tokenizer = AutoTokenizer.from_pretrained(model_path)
        self.model = AutoModel.from_pretrained(model_path)
        self.model.eval()
    def embed_documents(self, texts: list[str]) -> list[list[float]]:
        embeds = []
        for ind, text in enumerate(texts):
            print(f'text2embd {ind}/{len(texts)}') # 不含metadata
            embed = self.embd_infer(text) # list[str]
            embeds.append(embed)
        return embeds

    def embed_query(self, text: str) -> list[float]: # to embd 可能方法不同
        return self.embd_infer(text)

    def embd_infer(self, chunk):
        encoded_input = self.tokenizer(chunk, padding=True, truncation=True, return_tensors='pt')
        # Compute token embeddings
        with torch.no_grad():
            model_output = self.model(**encoded_input)
            # Perform pooling. In this case, cls pooling.
            sentence_embeddings = model_output[0][:, 0]
        # normalize embeddings
        sentence_embeddings = torch.nn.functional.normalize(sentence_embeddings, p=2, dim=1)
        sentence_embeddings = sentence_embeddings.tolist()[0]  # List[float]
        return sentence_embeddings

def test_CustomEmbeddings():
    model_path = r'/mnt/d/xiancai/bigfiles/models/bge-base-zh-v1___5'
    ce = CustomEmbeddings(model_path=model_path)
    res = ce.embed_query('你好')
    print(res)

    res = ce.embed_documents(['你好','test_chunk'])
    print(res)

if __name__ == '__main__':
    # chunks = get_chunks()

    test_CustomEmbeddings()
