Chat / corechat.py
Vijish's picture
Upload 3 files
fa947da verified
raw
history blame
4.11 kB
from pydantic import BaseModel
import openai
from environs import Env
from typing import List
# Configuration and API Key Management
env = Env()
env.read_env("openai.env")
openai.api_key = env.str("OPENAI_API_KEY")
SYSTEM_PROMPT = env.str("SYSTEM_PROMPT", "Suggest a suitable reply for a user in a dating conversation context.")
MODEL = env.str("MODEL", "gpt-3.5-turbo")
NUMBER_OF_MESSAGES_FOR_CONTEXT = min(env.int("NUMBER_OF_MESSAGES_FOR_CONTEXT", 4), 10)
AI_RESPONSE_TIMEOUT = env.int("AI_RESPONSE_TIMEOUT", 20)
class LastChatMessage(BaseModel):
fromUser: str
touser: str
class ConversationPayload(BaseModel):
fromusername: str
tousername: str
zodiansign: str
LastChatMessages: List[dict]
Chatmood: str
def transform_messages(last_chat_messages):
t_messages = []
for chat in last_chat_messages:
if "fromUser" in chat:
from_user = chat['fromUser']
message = chat.get('touser', '')
t_messages.append(f"{from_user}: {message}")
elif "touser" in chat:
to_user = chat['touser']
message = chat.get('fromUser', '')
t_messages.append(f"{to_user}: {message}")
if t_messages and "touser" in last_chat_messages[-1]:
latest_message = t_messages[-1]
latest_message = f"Q: {latest_message}"
t_messages[-1] = latest_message
return t_messages
def generate_system_prompt(last_chat_messages, fromusername, tousername, zodiansign=None, chatmood=None):
prompt = ""
if not last_chat_messages or ("touser" not in last_chat_messages[-1]):
prompt = f"Suggest a casual and friendly message for {fromusername} to start a conversation with {tousername} or continue naturally, as if talking to a good friend. Strictly avoid replying to messages from {fromusername} or answering their questions."
else:
prompt = f"Suggest a warm and friendly reply for {fromusername} to respond to the last message from {tousername}, as if responding to a dear friend. Strictly avoid replying to messages from {fromusername} or answering their questions."
if zodiansign:
prompt += f" Keep in mind {tousername}'s {zodiansign} zodiac sign."
if chatmood:
prompt += f" Consider the {chatmood} mood."
return prompt
def get_conversation_suggestions(last_chat_messages):
fromusername = last_chat_messages[-1].get("fromusername", "")
tousername = last_chat_messages[-1].get("tousername", "")
zodiansign = last_chat_messages[-1].get("zodiansign", "")
chatmood = last_chat_messages[-1].get("Chatmood", "")
messages = transform_messages(last_chat_messages)
system_prompt = generate_system_prompt(last_chat_messages, fromusername, tousername, zodiansign, chatmood)
messages_final = [{"role": "system", "content": system_prompt}]
if messages:
messages_final.extend([{"role": "user", "content": m} for m in messages])
else:
# If there are no messages, add a default message to ensure a response is generated
default_message = f"{tousername}: Hi there!"
messages_final.append({"role": "user", "content": default_message})
try:
response = openai.ChatCompletion.create(
model=MODEL,
messages=messages_final,
temperature=0.7,
max_tokens=150,
n=3,
request_timeout=AI_RESPONSE_TIMEOUT
)
formatted_replies = []
for idx, choice in enumerate(response.choices):
formatted_replies.append({
"type": "TEXT",
"body": choice.message['content'],
"title": f"AI Reply {idx + 1}",
"confidence": 1,
})
return formatted_replies
except openai.error.Timeout as e:
formatted_reply = [{
"type": "TEXT",
"body": "Request to the AI response generator has timed out. Please try again later.",
"title": "AI Response Error",
"confidence": 1
}]
return formatted_reply