from transformers import AutoTokenizer, AutoModel
import torch
from langchain.embeddings.base import Embeddings

class BGEM3Embeddings(Embeddings):
    def __init__(self, model, tokenizer):
        self.model = model
        self.tokenizer = tokenizer

    def embed_text(self, text: str) -> list:
        inputs = self.tokenizer(text, return_tensors="pt")
        with torch.no_grad():
            outputs = self.model(**inputs)
        embeddings = outputs.last_hidden_state.mean(dim=1).squeeze().tolist()
        return embeddings

    def embed_documents(self, texts: list) -> list:
        embeddings = [self.embed_text(text) for text in texts]
        return embeddings

    def embed_query(self, text: str) -> list:
        return self.embed_text(text)