Update linkedin_profile_chatbot/chat.py
Browse files
linkedin_profile_chatbot/chat.py
CHANGED
|
@@ -9,11 +9,27 @@ def evaluator_user_prompt(reply, message, history):
|
|
| 9 |
user_prompt += "Please evaluate the response, replying with whether it is acceptable and your feedback."
|
| 10 |
return user_prompt
|
| 11 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
def evaluate(reply, message, history) -> Evaluation:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
-
messages = [{"role": "system", "content": evaluator_system_prompt}] + [{"role": "user", "content": evaluator_user_prompt(reply, message, history)}]
|
| 15 |
-
response = gemini.beta.chat.completions.parse(model="gemini-2.0-flash", messages=messages, response_format=Evaluation)
|
| 16 |
-
return response.choices[0].message.parsed
|
| 17 |
|
| 18 |
def chat(message, history):
|
| 19 |
messages = [{"role":"system", "content": system_prompt}] + history + [{"role": "user", "content": message}]
|
|
|
|
| 9 |
user_prompt += "Please evaluate the response, replying with whether it is acceptable and your feedback."
|
| 10 |
return user_prompt
|
| 11 |
|
| 12 |
+
#def evaluate(reply, message, history) -> Evaluation:
|
| 13 |
+
|
| 14 |
+
# messages = [{"role": "system", "content": evaluator_system_prompt}] + [{"role": "user", "content": evaluator_user_prompt(reply, message, history)}]
|
| 15 |
+
# response = gemini.beta.chat.completions.parse(model="gemini-2.0-flash", messages=messages, response_format=Evaluation)
|
| 16 |
+
# return response.choices[0].message.parsed
|
| 17 |
+
|
| 18 |
def evaluate(reply, message, history) -> Evaluation:
|
| 19 |
+
messages = [
|
| 20 |
+
{"role": "system", "content": evaluator_system_prompt},
|
| 21 |
+
{"role": "user", "content": evaluator_user_prompt(reply, message, history)},
|
| 22 |
+
]
|
| 23 |
+
|
| 24 |
+
response = gemini.chat.completions.create(
|
| 25 |
+
model="gpt-4o-mini",
|
| 26 |
+
messages=messages,
|
| 27 |
+
response_format={"type": "json_schema", "schema": Evaluation.model_json_schema()},
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
parsed = Evaluation.model_validate_json(response.choices[0].message.content)
|
| 31 |
+
return parsed
|
| 32 |
|
|
|
|
|
|
|
|
|
|
| 33 |
|
| 34 |
def chat(message, history):
|
| 35 |
messages = [{"role":"system", "content": system_prompt}] + history + [{"role": "user", "content": message}]
|