import os
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'

from transformers import AutoModel, AutoTokenizer
import torch

class EmbeddingModel:
    def __init__(self, model_name, device='cuda'):
        """
        Initializes the embedding model with the specified model name and device.
        """
        self.tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
        self.model = AutoModel.from_pretrained(model_name, trust_remote_code=True)
        self.device = device if torch.cuda.is_available() else 'cpu'
        self.model.to(self.device)

    def get_embeddings(self, sentences):
        """
        Processes a list of sentences to produce their embeddings.
        """
        inputs = self.tokenizer(sentences, padding=True, truncation=True, max_length=512, return_tensors="pt")
        inputs_on_device = {k: v.to(self.device) for k, v in inputs.items()}
        outputs = self.model(**inputs_on_device, return_dict=True)
        embeddings = outputs.last_hidden_state[:, 0]  # cls token
        normalized_embeddings = embeddings / embeddings.norm(dim=1, keepdim=True)
        return normalized_embeddings

if __name__ == '__main__':
    # Usage example:
    model_name = '/home/jina-embeddings-v3'
    sentences = ['sentence_0', 'sentence_1']
    embedding_model = EmbeddingModel(model_name, device='cuda')
    embeddings = embedding_model.get_embeddings(sentences)
    print(embeddings)
