|
import openai |
|
import google.generativeai as genai |
|
|
|
from config import ( |
|
OPENAI_API_KEY, |
|
GEMINI_API_KEY, |
|
OPENAI_DEFAULT_MODEL, |
|
GEMINI_DEFAULT_MODEL |
|
) |
|
|
|
|
|
def configure_llms(): |
|
""" |
|
Configure OpenAI and Gemini if keys are provided. |
|
""" |
|
if OPENAI_API_KEY: |
|
openai.api_key = OPENAI_API_KEY |
|
if GEMINI_API_KEY: |
|
genai.configure(api_key=GEMINI_API_KEY) |
|
|
|
|
|
def openai_chat(system_prompt, user_prompt, model=None, temperature=0.3): |
|
""" |
|
Call OpenAI ChatCompletion with a system + user message. |
|
""" |
|
if not OPENAI_API_KEY: |
|
return "Error: OpenAI API key not provided." |
|
chat_model = model or OPENAI_DEFAULT_MODEL |
|
try: |
|
response = openai.ChatCompletion.create( |
|
model=chat_model, |
|
messages=[ |
|
{"role": "system", "content": system_prompt}, |
|
{"role": "user", "content": user_prompt} |
|
], |
|
temperature=temperature |
|
) |
|
return response.choices[0].message["content"].strip() |
|
except Exception as e: |
|
return f"Error calling OpenAI: {str(e)}" |
|
|
|
|
|
def gemini_chat(system_prompt, user_prompt, model_name=None, temperature=0.3): |
|
""" |
|
Call Google's PaLM2/Gemini via google.generativeai. |
|
""" |
|
if not GEMINI_API_KEY: |
|
return "Error: Gemini API key not provided." |
|
final_model = model_name or GEMINI_DEFAULT_MODEL |
|
try: |
|
model = genai.GenerativeModel(model_name=final_model) |
|
chat_session = model.start_chat(history=[("system", system_prompt)]) |
|
reply = chat_session.send_message(user_prompt, temperature=temperature) |
|
return reply.text |
|
except Exception as e: |
|
return f"Error calling Gemini: {str(e)}" |
|
|