from transformers import AutoModel, AutoTokenizer
from .clean_text import clean_text
import torch
import faiss
import numpy as np
import os
import sys
import logging

logger = logging.getLogger(__name__)

MODEL_ID = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..","..","..", "ai_models", "model-jinaai-v3")
logger.info(f"loaded model id:{MODEL_ID}")
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
model = AutoModel.from_pretrained(MODEL_ID, trust_remote_code=True)
hidden_size = model.config.hidden_size


def get_embedding(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
    outputs = model(**inputs)
    # 使用[CLS] token的向量作为整个文本的表示
    # logger.info(f"get_embedding_outputs:\n{outputs}")
    return outputs.last_hidden_state[:, 0, :].detach().numpy()




