|
import os |
|
|
|
from llama_index.llms.nebius import NebiusLLM |
|
|
|
|
|
from dotenv import load_dotenv |
|
load_dotenv() |
|
|
|
LLM_PROVIDER = os.environ.get("LLM_PROVIDER", "openllm").lower() |
|
LLM_API_URL = os.environ.get("LLM_API_URL") |
|
LLM_API_KEY = os.environ.get("LLM_API_KEY") |
|
NEBIUS_API_KEY = os.environ.get("NEBIUS_API_KEY", "") |
|
OPENLLM_MODEL = os.environ.get("OPENLLM_MODEL") |
|
NEBIUS_MODEL = os.environ.get("NEBIUS_MODEL") |
|
|
|
if LLM_PROVIDER == "nebius": |
|
llm = NebiusLLM( |
|
api_key=NEBIUS_API_KEY, |
|
model=NEBIUS_MODEL |
|
) |
|
else: |
|
pass |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import re |
|
|
|
def call_llm_api(messages): |
|
""" |
|
Calls the LLM API endpoint with the conversation messages using OpenLLM or NebiusLLM. |
|
Args: |
|
messages (list): List of dicts with 'role' and 'content' for each message. |
|
Returns: |
|
str: The assistant's reply as a string. |
|
""" |
|
from llama_index.core.llms import ChatMessage |
|
chat_messages = [ChatMessage(role=m["role"], content=m["content"]) for m in messages] |
|
response = llm.chat(chat_messages) |
|
return response.message.content |
|
|
|
def is_stage_complete(llm_reply): |
|
""" |
|
Heuristic to determine if the current stage is complete based on LLM reply. |
|
Args: |
|
llm_reply (str): The assistant's reply. |
|
Returns: |
|
bool: True if the stage is considered complete, False otherwise. |
|
""" |
|
triggers = [ |
|
"stage complete", |
|
"let's move to the next stage", |
|
"moving to the next stage", |
|
"next stage", |
|
"you have completed this stage" |
|
] |
|
return any(trigger in llm_reply.lower() for trigger in triggers) |
|
|