Spaces:
Sleeping
Sleeping
# models.py | |
import torch | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
from sentence_transformers import SentenceTransformer | |
from config import EMBEDDING_MODEL_NAME | |
# Cargar el modelo de embeddings | |
def load_embedding_model(): | |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | |
embedding_model = SentenceTransformer(EMBEDDING_MODEL_NAME, device=device) | |
return embedding_model | |
# Cargar el modelo Yi-Coder | |
def load_yi_coder_model(): | |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | |
model_path = "01-ai/Yi-Coder-9B-Chat" # Asegúrate de que esta ruta sea correcta y que el modelo esté disponible | |
tokenizer = AutoTokenizer.from_pretrained(model_path) | |
yi_coder_model = AutoModelForCausalLM.from_pretrained( | |
model_path, | |
torch_dtype=torch.float16, | |
low_cpu_mem_usage=True # Opcional: ayuda a reducir el uso de memoria al cargar el modelo | |
).to(device).eval() | |
return tokenizer, yi_coder_model, device | |