#!/anaconda3/envs/FEALPy/bin python3.8
# -*- coding: utf-8 -*-
# File: sentenc_embedding.py
# Author: Bryan SHEN
# E-mail: m18801919240_3@163.com
# Site: Shanghai, China
# Time: 2024/4/22 13:03
# File-Desp:


from transformers import AutoModel, AutoTokenizer
import torch


class Embedding(object):

    def __init__(self, model_path):
        # Initialize the tokenizer and model from the given model path
        self.tokenizer = AutoTokenizer.from_pretrained(model_path)
        self.model = AutoModel.from_pretrained(model_path)

        # Check if CUDA is available and use it; otherwise, use CPU
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        self.model.to(self.device)

    def get_embedding(self, sentences):

        # Tokenize the input sentences
        inputs = self.tokenizer(sentences, padding=True, truncation=True, max_length=512, return_tensors="pt")

        # Move tensors to the appropriate device
        inputs_on_device = {k: v.to(self.device) for k, v in inputs.items()}

        # Generate embeddings
        with torch.no_grad():  # Ensure no gradients are calculated during forward pass
            outputs = self.model(**inputs_on_device, return_dict=True)

        # Extract the embeddings for the first token (usually the [CLS] token)
        embeddings = outputs.last_hidden_state[:, 0]

        # Normalize the embeddings
        embeddings = embeddings / embeddings.norm(dim=1, keepdim=True)

        # Move embeddings back to CPU and convert to numpy if necessary, for example:
        embeddings = embeddings.cpu().numpy()

        return embeddings


if __name__ == '__main__':

    # Usage example:
    model_path = '/data_hdd/bryan/social-hotspot-collect-and-cluster/models/bce-embedding-base_v1'
    embedding = Embedding(model_path)
    sentences = ["This is a sentence.", "Here's another one.", "And a third."]
    embeddings = embedding.get_emb(sentences)
    print(embeddings)

