Spaces:
PhilSpiel
/
Running

PhilSpiel commited on
Commit
ef731f5
1 Parent(s): 421fb28

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +67 -0
app.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ from openai import OpenAI
4
+ import os.path
5
+
6
+ ################# Start PERSONA-SPECIFIC VALUES ######################
7
+ coach_code = "gp"
8
+ coach_name_short = "General Patton"
9
+ coach_name_upper = "GENERAL PATTON"
10
+ coach_name_long = "General George S. Patton"
11
+ sys_prompt_new = os.getenv("PROMPT_NEW")
12
+ ################# End PERSONA-SPECIFIC VALUES ######################
13
+
14
+ ################# Start OpenAI-SPECIFIC VALUES ######################
15
+ # Initialize OpenAI API client with API key
16
+ client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
17
+ # OpenAI model
18
+ openai_model = "gpt-3.5-turbo-0125"
19
+ ################# End OpenAI-SPECIFIC VALUES ######################
20
+
21
+ ################# Log in ######################
22
+ security = os.getenv("AUTH")
23
+
24
+ ############### CHAT ###################
25
+ def predict(user_input, history):
26
+ max_length = 500
27
+ if len(user_input) > max_length:
28
+ raise gr.Error(f"Input is TOO LONG. Max length is {max_length} characters. Try again.")
29
+
30
+ history_openai_format = [
31
+ {"role": "system", "content": "IDENTITY: " + sys_prompt_new}
32
+ ]
33
+ for human, assistant in history:
34
+ history_openai_format.append({"role": "user", "content": human })
35
+ history_openai_format.append({"role": "assistant", "content":assistant})
36
+ history_openai_format.append({"role": "user", "content": user_input})
37
+
38
+ completion = client.chat.completions.create(
39
+ model=openai_model,
40
+ messages= history_openai_format,
41
+ temperature=1.2,
42
+ frequency_penalty=0.4,
43
+ presence_penalty=0.1,
44
+ stream=True
45
+ )
46
+
47
+ output_stream = ""
48
+ for chunk in completion:
49
+ if chunk.choices[0].delta.content is not None:
50
+ output_stream = output_stream + (chunk.choices[0].delta.content)
51
+ yield output_stream
52
+ message_content = output_stream
53
+
54
+ return message_content
55
+
56
+ def same_auth(username, password):
57
+ username = username + security
58
+ return username == password
59
+
60
+ #GUI
61
+ theme = gr.themes.Default()
62
+
63
+ with gr.Blocks(theme) as demo:
64
+
65
+ gr.ChatInterface(predict, submit_btn="Chat with "+ coach_name_short, retry_btn=None, undo_btn=None, clear_btn=None, autofocus=True)
66
+
67
+ demo.launch(show_api=False, auth=same_auth)