Chat / handler.py
vkoottu's picture
Update handler.py
c1406b0 verified
raw
history blame
9.43 kB
from pydantic import BaseModel
import openai
from environs import Env
from typing import List, Dict, Any
import requests
def download_env_file(url: str, local_path: str):
response = requests.get(url)
response.raise_for_status() # Ensure we notice bad responses
with open(local_path, 'wb') as f:
f.write(response.content)
# Download the .env file
env_file_url = "https://drive.google.com/uc?export=download&id=1bIkq-X1S9943w8-rp8NErTP9G4YfXqKa" # Adjusted URL for direct download
local_env_path = "openai.env"
download_env_file(env_file_url, local_env_path)
# Load environment variables
env = Env()
env.read_env("openai.env")
openai.api_key = env.str("OPENAI_API_KEY")
# Constants
MODEL = env.str("MODEL", "gpt-3.5-turbo")
AI_RESPONSE_TIMEOUT = env.int("AI_RESPONSE_TIMEOUT", 20)
class EndpointHandler:
def __init__(self, model_dir=None):
self.model_dir = model_dir
def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:
try:
if "inputs" in data: # Check if data is in Hugging Face JSON format
return self.process_hf_input(data)
else:
return self.process_json_input(data)
except ValueError as e:
return {"error": str(e)}
except Exception as e:
return {"error": str(e)}
def process_json_input(self, json_data):
if "FromUserKavasQuestions" in json_data and "Chatmood" in json_data:
prompt = self.create_conversation_starter_prompt(
json_data["FromUserKavasQuestions"],
json_data["Chatmood"]
)
starter_suggestion = self.generate_conversation_starters(prompt)
return {"conversation_starter": starter_suggestion}
elif "LastChatMessages" in json_data:
last_chat_messages = json_data["LastChatMessages"][-4:]
response = {
"version": "1.0.0-alpha",
"suggested_responses": self.get_conversation_suggestions(last_chat_messages)
}
return response
else:
raise ValueError("Invalid JSON structure.")
def process_hf_input(self, hf_data):
print("Received HF Data:", hf_data) # Debugging line
if "inputs" in hf_data:
actual_data = hf_data["inputs"]
print("Processing actual data:", actual_data) # Debugging line
return self.process_json_input(actual_data)
else:
return {"error": "Invalid Hugging Face JSON structure."}
def create_conversation_starter_prompt(self, user_questions, chatmood):
formatted_info = " ".join([f"{qa['Question']} - {qa['Answer']}" for qa in user_questions if qa['Answer']])
prompt = (f"Based on user profile info and a {chatmood} mood, "
f"generate 3 subtle and very short conversation starters. "
f"Explore various topics like travel, hobbies, movies, and not just culinary tastes. "
f"\nProfile Info: {formatted_info}")
return prompt
def generate_conversation_starters(self, prompt):
try:
response = openai.ChatCompletion.create(
model=MODEL,
messages=[{"role": "system", "content": prompt}],
temperature=0.7,
max_tokens=100,
n=1,
request_timeout=AI_RESPONSE_TIMEOUT
)
return response.choices[0].message["content"]
except openai.error.OpenAIError as e:
raise Exception(f"OpenAI API error: {str(e)}")
except Exception as e:
raise Exception(f"Unexpected error: {str(e)}")
def transform_messages(self, last_chat_messages):
t_messages = []
for chat in last_chat_messages:
if "fromUser" in chat:
from_user = chat['fromUser']
message = chat.get('touser', '')
t_messages.append(f"{from_user}: {message}")
elif "touser" in chat:
to_user = chat['touser']
message = chat.get('fromUser', '')
t_messages.append(f"{to_user}: {message}")
if t_messages and "touser" in last_chat_messages[-1]:
latest_message = t_messages[-1]
latest_message = f"Q: {latest_message}"
t_messages[-1] = latest_message
return t_messages
def generate_system_prompt(self, last_chat_messages, fromusername, tousername, zodiansign=None, chatmood=None):
prompt = ""
if not last_chat_messages or ("touser" not in last_chat_messages[-1]):
prompt = (f"Suggest a casual and friendly message for {fromusername} to start a conversation with {tousername} or continue naturally, "
f"as if talking to a good friend. Strictly avoid replying to messages from {fromusername} or answering their questions. "
f"Make sure the message reflects a {chatmood} mood.")
else:
prompt = (f"Suggest a warm and friendly reply for {fromusername} to respond to the last message from {tousername}, "
f"as if responding to a dear friend. Strictly avoid replying to messages from {fromusername} or answering their questions. "
f"Ensure the reply embodies a {chatmood} mood.")
if zodiansign:
prompt += f" Keep in mind {tousername}'s {zodiansign} zodiac sign."
if chatmood:
mood_instructions = {
"Casual Vibes": " Keep the conversation relaxed and informal, using phrases like 'Hey, what's up?' or 'Just chilling, how about you?'",
"Flirty Fun": " Add a playful and teasing tone, using phrases like 'You always know how to make me smile!' or 'Guess what? I have a secret to tell you.'",
"Deep and Thoughtful": " Encourage reflective and introspective responses, using phrases like 'I've been thinking about...' or 'What's your take on...?'",
"Humor Central": " Incorporate witty and humorous elements, using phrases like 'Why did the chicken cross the road?' or 'I have a hilarious story for you!'",
"Romantic Feels": " Express affection and use sweet and romantic language, using phrases like 'You're the best part of my day' or 'I can't stop thinking about you.'",
"Intellectual Banter": " Engage in thought-provoking discussions on topics like books and movies, using phrases like 'Have you read any good books lately?' or 'What do you think about the latest film?'",
"Supportive Mode": " Offer empathy, support, and encouragement, using phrases like 'I'm here for you' or 'Everything will be okay, I believe in you.'",
"Curiosity Unleashed": " Show eagerness to learn and explore interests by asking questions, using phrases like 'Tell me more about...' or 'I'm curious, how did you get into...?'",
"Chill and Easygoing": " Maintain a relaxed and laid-back tone, using phrases like 'No worries, take your time' or 'Just go with the flow.'",
"Adventurous Spirit": " Share travel stories and plans with enthusiasm and energy, using phrases like 'Let's plan our next adventure!' or 'Guess where I want to go next?'"
}
prompt += mood_instructions.get(chatmood, "")
return prompt
def get_conversation_suggestions(self, last_chat_messages):
fromusername = last_chat_messages[-1].get("fromusername", "")
tousername = last_chat_messages[-1].get("tousername", "")
zodiansign = last_chat_messages[-1].get("zodiansign", "")
chatmood = last_chat_messages[-1].get("Chatmood", "")
messages = self.transform_messages(last_chat_messages)
system_prompt = self.generate_system_prompt(last_chat_messages, fromusername, tousername, zodiansign, chatmood)
messages_final = [{"role": "system", "content": system_prompt}]
if messages:
messages_final.extend([{"role": "user", "content": m} for m in messages])
else:
# If there are no messages, add a default message to ensure a response is generated
default_message = f"{tousername}: Hi there!"
messages_final.append({"role": "user", "content": default_message})
try:
response = openai.ChatCompletion.create(
model=MODEL,
messages=messages_final,
temperature=0.7,
max_tokens=150,
n=3,
request_timeout=AI_RESPONSE_TIMEOUT
)
formatted_replies = []
for idx, choice in enumerate(response.choices):
formatted_replies.append({
"type": "TEXT",
"body": choice.message['content'],
"title": f"AI Reply {idx + 1}",
"confidence": 1,
})
return formatted_replies
except openai.error.Timeout as e:
formatted_reply = [{
"type": "TEXT",
"body": "Request to the AI response generator has timed out. Please try again later.",
"title": "AI Response Error",
"confidence": 1
}]
return formatted_reply