import os
import pandas as pd
import re
import json
import numpy as np
from pathlib import Path

from transformers import AutoTokenizer, AutoModel, BertModel, BertTokenizer
from sentence_transformers import SentenceTransformer, util


dict_data = {}
with open('../LLM_APIs/config.txt', 'r') as file:
    lines = file.readlines()

    for current_line in lines:
        key, value = current_line.split('\t')
        dict_data[key.strip()] = value.strip()

EBD_MODEL = Path(dict_data['Local_encode'])

tokenizer = AutoTokenizer.from_pretrained(EBD_MODEL, trust_remote_code=True)
model_kwargs = {'device': 'cuda'}
model = SentenceTransformer(EBD_MODEL, model_kwargs)


def vectorize_texts(texts, use_tensor=False, sentence=False):
    embeddings = model.encode(texts, convert_to_tensor=use_tensor)
    return texts, embeddings
    
            
    
    


