Spaces:
Running
Running
Create gen_api_answer.py
Browse files- gen_api_answer.py +99 -0
gen_api_answer.py
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from openai import OpenAI
|
2 |
+
import anthropic
|
3 |
+
from together import Together
|
4 |
+
import json
|
5 |
+
import re
|
6 |
+
|
7 |
+
# Initialize clients
|
8 |
+
anthropic_client = anthropic.Anthropic()
|
9 |
+
openai_client = OpenAI()
|
10 |
+
together_client = Together()
|
11 |
+
|
12 |
+
SYSTEM_PROMPT = """Please act as an impartial judge and evaluate based on the user's instruction. Your output format should strictly adhere to JSON as follows: {"feedback": "<write feedback>", "result": <numerical score>}. Ensure the output is valid JSON, without additional formatting or explanations."""
|
13 |
+
|
14 |
+
def get_openai_response(model_name, prompt):
|
15 |
+
"""Get response from OpenAI API"""
|
16 |
+
try:
|
17 |
+
response = openai_client.chat.completions.create(
|
18 |
+
model=model_name,
|
19 |
+
messages=[
|
20 |
+
{"role": "system", "content": SYSTEM_PROMPT},
|
21 |
+
{"role": "user", "content": prompt}
|
22 |
+
]
|
23 |
+
)
|
24 |
+
return response.choices[0].message.content
|
25 |
+
except Exception as e:
|
26 |
+
return f"Error with OpenAI model {model_name}: {str(e)}"
|
27 |
+
|
28 |
+
def get_anthropic_response(model_name, prompt):
|
29 |
+
"""Get response from Anthropic API"""
|
30 |
+
try:
|
31 |
+
response = anthropic_client.messages.create(
|
32 |
+
model=model_name,
|
33 |
+
max_tokens=1000,
|
34 |
+
temperature=0,
|
35 |
+
system=SYSTEM_PROMPT,
|
36 |
+
messages=[
|
37 |
+
{"role": "user", "content": [{"type": "text", "text": prompt}]}
|
38 |
+
]
|
39 |
+
)
|
40 |
+
return response.content[0].text
|
41 |
+
except Exception as e:
|
42 |
+
return f"Error with Anthropic model {model_name}: {str(e)}"
|
43 |
+
|
44 |
+
def get_together_response(model_name, prompt):
|
45 |
+
"""Get response from Together API"""
|
46 |
+
try:
|
47 |
+
response = together_client.chat.completions.create(
|
48 |
+
model=model_name,
|
49 |
+
messages=[
|
50 |
+
{"role": "system", "content": SYSTEM_PROMPT},
|
51 |
+
{"role": "user", "content": prompt}
|
52 |
+
],
|
53 |
+
stream=False
|
54 |
+
)
|
55 |
+
return response.choices[0].message.content
|
56 |
+
except Exception as e:
|
57 |
+
return f"Error with Together model {model_name}: {str(e)}"
|
58 |
+
|
59 |
+
def get_model_response(model_name, model_info, prompt):
|
60 |
+
"""Get response from appropriate API based on model organization"""
|
61 |
+
if not model_info:
|
62 |
+
return "Model not found or unsupported."
|
63 |
+
|
64 |
+
api_model = model_info['api_model']
|
65 |
+
organization = model_info['organization']
|
66 |
+
|
67 |
+
try:
|
68 |
+
if organization == 'OpenAI':
|
69 |
+
return get_openai_response(api_model, prompt)
|
70 |
+
elif organization == 'Anthropic':
|
71 |
+
return get_anthropic_response(api_model, prompt)
|
72 |
+
else:
|
73 |
+
# All other organizations use Together API
|
74 |
+
return get_together_response(api_model, prompt)
|
75 |
+
except Exception as e:
|
76 |
+
return f"Error with {organization} model {model_name}: {str(e)}"
|
77 |
+
|
78 |
+
def parse_model_response(response):
|
79 |
+
try:
|
80 |
+
# Debug print
|
81 |
+
print(f"Raw model response: {response}")
|
82 |
+
|
83 |
+
# First try to parse the entire response as JSON
|
84 |
+
try:
|
85 |
+
data = json.loads(response)
|
86 |
+
return str(data.get('result', 'N/A')), data.get('feedback', 'N/A')
|
87 |
+
except json.JSONDecodeError:
|
88 |
+
# If that fails (typically for smaller models), try to find JSON within the response
|
89 |
+
json_match = re.search(r'{.*}', response)
|
90 |
+
if json_match:
|
91 |
+
data = json.loads(json_match.group(0))
|
92 |
+
return str(data.get('result', 'N/A')), data.get('feedback', 'N/A')
|
93 |
+
else:
|
94 |
+
return 'Error', f"Failed to parse response: {response}"
|
95 |
+
|
96 |
+
except Exception as e:
|
97 |
+
# Debug print for error case
|
98 |
+
print(f"Failed to parse response: {str(e)}")
|
99 |
+
return 'Error', f"Failed to parse response: {response}"
|