Spaces:
PhilSpiel
/
Running

5c4d / app.py
PhilSpiel's picture
Update app.py
7ab4356 verified
raw
history blame
No virus
3.09 kB
import gradio as gr
import os
from openai import OpenAI
import os.path
from datetime import datetime
################# Start PERSONA-SPECIFIC VALUES ######################
coach_code = os.getenv("COACH_CODE")
coach_name_short = os.getenv("COACH_NAME_SHORT")
coach_name_upper = os.getenv("COACH_NAME_UPPER")
sys_prompt_new = os.getenv("PROMPT_NEW")
theme=""
################# End PERSONA-SPECIFIC VALUES ######################
################# Start OpenAI-SPECIFIC VALUES ######################
# Initialize OpenAI API client with API key
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
# OpenAI model
openai_model = "gpt-3.5-turbo-0125"
################# End OpenAI-SPECIFIC VALUES ######################
tx = os.getenv("TX")
prefix = "" # "data/" if local or "/data/" if persistent in HF
############### CHAT ###################
def predict(user_input, history):
max_length = 500
transcript_file_path = f"{prefix}{coach_code}-transcript.txt"
transcript = "" # Initialize the transcript variable
if user_input == tx:
try:
# Prepare the transcript for the Textbox output
if os.path.exists(transcript_file_path):
with open(transcript_file_path, "r", encoding="UTF-8") as file:
transcript = file.read()
return transcript
except FileNotFoundError:
return "File 'transcript.txt' not found."
elif len(user_input) > max_length:
raise gr.Error(f"Input is TOO LONG. Max length is {max_length} characters. Try again.")
history_openai_format = [
{"role": "system", "content": "IDENTITY: " + sys_prompt_new}
]
for human, assistant in history:
history_openai_format.append({"role": "user", "content": human})
history_openai_format.append({"role": "assistant", "content": assistant})
history_openai_format.append({"role": "user", "content": user_input})
completion = client.chat.completions.create(
model=openai_model,
messages=history_openai_format,
temperature=1.2,
frequency_penalty=0.4,
presence_penalty=0.1,
stream=True
)
output_stream = ""
try:
for chunk in completion:
if chunk.choices[0].delta.content is not None:
output_stream = output_stream + (chunk.choices[0].delta.content)
message_content = output_stream
except StopAsyncIteration:
pass
# Append latest user and assistant messages to the transcript
transcript += "Date/Time: " + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\n\n"
transcript += f"YOU: {user_input}\n\n"
transcript += f"{coach_name_upper}: {message_content}\n\n\n"
# Write the updated transcript to the file
with open(transcript_file_path, "a", encoding="UTF-8") as file:
file.write(transcript)
return message_content
#GUI
with gr.Blocks(theme) as demo:
gr.ChatInterface(predict, submit_btn="Chat with "+ coach_name_short, retry_btn=None, undo_btn=None, clear_btn=None, autofocus=True)
demo.launch(show_api=False)