Spaces:
Sleeping
Sleeping
""" llm_utils.py | |
Utilities for working with Large Language Models | |
:author: Didier Guillevic | |
:email: didier@guillevic.net | |
:creation: 2024-12-28 | |
""" | |
import logging | |
logger = logging.getLogger(__name__) | |
logging.basicConfig(level=logging.INFO) | |
import os | |
from mistralai import Mistral | |
# | |
# Mistral AI client | |
# | |
api_key = os.environ["MISTRAL_API_KEY"] | |
client = Mistral(api_key=api_key) | |
model_id = "mistral-large-latest" # 128k context window | |
# | |
# Some functions | |
# | |
def generate_chat_response_streaming( | |
query: str, | |
context: str, | |
max_new_tokens=1_024, | |
temperature=0.0 | |
): | |
""" | |
""" | |
# Instruction | |
instruction = ( | |
f"You will be given a question and list of context that might " | |
f"be relevant to the question. " | |
f"Do not include facts not contained in the provided context. " | |
f"If no such relecant context provided to answer the question, " | |
f"then soimply say so. Do not invent anything.\n\n" | |
f"Question: {query}\n\n\n" | |
f"Context:\n\n{context}" | |
) | |
# messages | |
messages = [] | |
messages.append({'role': 'user', 'content': instruction}) | |
#logger.info(messages) | |
# Yield the model response as the tokens are being generated | |
stream_reponse = client.chat.stream(model=model_id, messages=messages) | |
return stream_reponse | |