Spaces:
Sleeping
Sleeping
File size: 908 Bytes
94d4bbe |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 |
# llm_config.py
import json
import os
from dotenv import load_dotenv
from openai import OpenAI
# Load environment variables
load_dotenv()
# Get the API key from the environment variable
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
# Check if the API key is set
if not OPENAI_API_KEY:
raise ValueError("OPENAI_API_KEY environment variable is not set")
# Initialize the OpenAI client
client = OpenAI(api_key=OPENAI_API_KEY)
def generate_llm_response(prompt):
try:
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": prompt}],
temperature=0.01
)
content = response.choices[0].message.content.strip()
#print(f"LLM Response: {content}") # For debugging
return content
except Exception as e:
print(f"Error generating LLM response: {str(e)}")
return None
|