simpx commited on
Commit
ee5bd25
1 Parent(s): 0358438
Files changed (1) hide show
  1. app.py +18 -12
app.py CHANGED
@@ -1,14 +1,23 @@
1
  import gradio as gr
2
  import openai
3
 
 
 
4
  config = open("config").readlines()
5
  openai.api_key = config[0].strip()
6
- openai.api_base = config[1].strip()
 
 
 
 
 
 
 
7
 
8
  '''
9
- gradio: [['第一次说话', 'No'], ['试试第二次', 'Yes']]
10
- openai: [{"role": "user", "content": "第一次说话"},
11
- {"role": "assistant", "content": "Who won the world series in 2020?"}]
12
  '''
13
  def gradio_messages_to_openai_messages(g):
14
  result = []
@@ -18,23 +27,20 @@ def gradio_messages_to_openai_messages(g):
18
  return result
19
 
20
  def respond(chat_history, message):
21
- print("----------------")
22
- print("chat_histroy:", chat_history)
23
- print("message:", message)
24
  messages = [
25
- {"role": "system", "content": "后面的回答必须简明扼要"},
26
  *gradio_messages_to_openai_messages(chat_history),
27
  {"role": "user", "content": message}
28
  ]
29
- print("messages:", messages)
30
  completion = openai.ChatCompletion.create(
31
  model="gpt-3.5-turbo",
32
  messages=messages
33
  )
34
- print("completion:", completion)
 
 
35
  response = completion['choices'][0]['message']['content']
36
  result = chat_history + [[message, response]]
37
- print("result:", result)
38
  return result
39
 
40
  with gr.Blocks() as demo:
@@ -57,4 +63,4 @@ with gr.Blocks() as demo:
57
  chatbot,
58
  )
59
 
60
- demo.launch(server_name="0.0.0.0", server_port=8000)
 
1
  import gradio as gr
2
  import openai
3
 
4
+ # The first line contains the OpenAI key, while the second line provides the OpenAI URL, which is useful when the OpenAI server is hidden behind a proxy server.
5
+ # eg. first line "sk-xxxxxxxxxx", second line "http://PROXY-URL"
6
  config = open("config").readlines()
7
  openai.api_key = config[0].strip()
8
+ if len(config) > 1 and len(config[1].strip()) > 0:
9
+ openai.api_base = config[1].strip()
10
+
11
+ # config
12
+ system_message = "You are an assistant who gives brief and concise answers."
13
+ server_name = "0.0.0.0"
14
+ server_port = 8000
15
+ DEBUG = False
16
 
17
  '''
18
+ gradio: [['first question', 'No'], ['second question', 'Yes']]
19
+ openai: [{"role": "user", "content": "first question"}, {"role": "assistant", "content": "No"}
20
+ {"role": "user", "content": "second question"}, {"role": "assistant", "content": "Yes"}]
21
  '''
22
  def gradio_messages_to_openai_messages(g):
23
  result = []
 
27
  return result
28
 
29
  def respond(chat_history, message):
 
 
 
30
  messages = [
31
+ {"role": "system", "content": system_message},
32
  *gradio_messages_to_openai_messages(chat_history),
33
  {"role": "user", "content": message}
34
  ]
 
35
  completion = openai.ChatCompletion.create(
36
  model="gpt-3.5-turbo",
37
  messages=messages
38
  )
39
+ if DEBUG:
40
+ print("messages:", messages)
41
+ print("completion:", completion)
42
  response = completion['choices'][0]['message']['content']
43
  result = chat_history + [[message, response]]
 
44
  return result
45
 
46
  with gr.Blocks() as demo:
 
63
  chatbot,
64
  )
65
 
66
+ demo.launch(server_name=server_name, server_port=server_port)