|
|
import torch |
|
|
from sentence_transformers import SentenceTransformer, CrossEncoder |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
EMBEDDING_MODEL_M3 = "BAAI/bge-m3" |
|
|
EMBEDDING_MODEL_LARGE = "BAAI/bge-large-en-v1.5" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
print(f"Using device: {device}") |
|
|
|
|
|
|
|
|
embedding_model_m3 = SentenceTransformer(EMBEDDING_MODEL_M3, device=device) |
|
|
embedding_model_large = SentenceTransformer(EMBEDDING_MODEL_LARGE, device=device) |
|
|
|
|
|
embedding_dim_m3 = embedding_model_m3.get_sentence_embedding_dimension() |
|
|
embedding_dim_large = embedding_model_large.get_sentence_embedding_dimension() |
|
|
|
|
|
reranker = CrossEncoder("cross-encoder/ms-marco-MiniLM-L-6-v2") |