Santi Diana
Automatic Evaluation Available. Read the README filegit add add_new_model/README.md add_new_model/add_new_model.py app.py add_new_model/execute_evaluation.py
d38c074
import huggingface_hub
import os
import argparse
import logging
import torch
from mteb import MTEB
from sentence_transformers import SentenceTransformer
from huggingface_hub import login
try:
huggingface_hub.login(
token=os.environ["HUGGINGFACE_TOKEN"], write_permission=True
)
except Exception as e:
print(f"Original error: {e}")
def get_device():
return "cuda" if torch.cuda.is_available() else "cpu"
def load_model(model_id, device):
model = SentenceTransformer(model_id).to(device)
logging.info(f"Loaded model {model_id} to the device {device}")
return model
def run_evaluation(model, model_id,output_folder):
evaluation = MTEB(task_langs=["es"])
evaluation.run(model, output_folder=output_folder, eval_splits=["test"])
def evaluate(model_id):
logging.basicConfig(level=logging.INFO)
output_folder = f'results/{model_id}'
device = get_device()
model = load_model(model_id, device)
run_evaluation(model, model_id,output_folder)
return output_folder