Spaces:
Sleeping
Sleeping
| import os | |
| from openai import OpenAI | |
| from dotenv import load_dotenv | |
| import logging | |
| load_dotenv() | |
| logging.basicConfig(level=logging.INFO) | |
| logger = logging.getLogger(__name__) | |
| def generate_response(user_prompt, model_name, system_prompt): | |
| try: | |
| # Get API key from environment variable | |
| api_key = os.getenv("OPENROUTER_API_KEY") | |
| if not api_key: | |
| return "Error: API key not found in environment variables" | |
| # Initialize OpenRouter client using OpenAI with custom base URL | |
| client = OpenAI( | |
| base_url="https://openrouter.ai/api/v1", | |
| api_key=api_key, | |
| default_headers={ | |
| "HTTP-Referer": "null", | |
| "X-Title": "LLMInterface", | |
| } | |
| ) | |
| # Map display names to actual model names | |
| model_mapping = { | |
| "Claude Haiku": "anthropic/claude-3.5-haiku-20241022:beta", | |
| "DeepSeek": "deepseek/deepseek-r1:free", | |
| "Claude Premium": "anthropic/claude-3.5-sonnet", | |
| "GPT Pro": "openai/gpt-4-0125-preview" | |
| } | |
| # Get the actual model name | |
| actual_model = model_mapping.get(model_name) | |
| if not actual_model: | |
| return "Error: Invalid model selection" | |
| # Make the API call | |
| response = client.chat.completions.create( | |
| model=actual_model, | |
| messages=[ | |
| {"role": "system", "content": system_prompt}, | |
| {"role": "user", "content": user_prompt} | |
| ] | |
| ) | |
| return response.choices[0].message.content | |
| except Exception as e: | |
| logger.error(f"Error in generate_response: {str(e)}") | |
| return f"Error occurred: {str(e)}" | |