SamiKoen commited on
Commit
8c8752a
1 Parent(s): e320662

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -29
app.py CHANGED
@@ -3,15 +3,14 @@ import os
3
  import json
4
  import requests
5
 
6
- #Streaming endpoint
7
  API_URL = "https://api.openai.com/v1/chat/completions" #os.getenv("API_URL") + "/generate_stream"
8
 
9
- #Huggingface provided GPT4 OpenAI API Key
10
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
11
 
12
- #Inferenec function
13
  def predict(system_msg, inputs, top_p, temperature, chat_counter, chatbot=[], history=[]):
14
- prompt = {"Sen bir Trek bisiklet asistanısın"}
15
  headers = {
16
  "Content-Type": "application/json",
17
  "Authorization": f"Bearer {OPENAI_API_KEY}"
@@ -46,7 +45,7 @@ def predict(system_msg, inputs, top_p, temperature, chat_counter, chatbot=[], hi
46
  temp["role"] = "user"
47
  temp["content"] = inputs
48
  messages.append(temp)
49
- #messages
50
  payload = {
51
  "model": "gpt-3.5-turbo",
52
  "messages": messages,
@@ -61,7 +60,7 @@ def predict(system_msg, inputs, top_p, temperature, chat_counter, chatbot=[], hi
61
 
62
  history.append(inputs)
63
  print(f"Logging : payload is - {payload}")
64
- # make a POST request to the API endpoint using the requests.post method, passing in stream=True
65
  response = requests.post(API_URL, headers=headers, json=payload, stream=True)
66
  print(f"Logging : response code - {response}")
67
  token_counter = 0
@@ -69,14 +68,14 @@ def predict(system_msg, inputs, top_p, temperature, chat_counter, chatbot=[], hi
69
 
70
  counter=0
71
  for chunk in response.iter_lines():
72
- #Skipping first chunk
73
  if counter == 0:
74
  counter+=1
75
  continue
76
- # check whether each line is non-empty
77
  if chunk.decode() :
78
  chunk = chunk.decode()
79
- # decode each line as response data is in bytes
80
  if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']:
81
  partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"]
82
  if token_counter == 0:
@@ -87,58 +86,49 @@ def predict(system_msg, inputs, top_p, temperature, chat_counter, chatbot=[], hi
87
  token_counter+=1
88
  yield chat, history, chat_counter, response # resembles {chatbot: chat, state: history}
89
 
90
- #Resetting to blank
91
  def reset_textbox():
92
  return gr.update(value='')
93
 
94
- #to set a component as visible=False
95
  def set_visible_false():
96
  return gr.update(visible=False)
97
 
98
- #to set a component as visible=True
99
  def set_visible_true():
100
  return gr.update(visible=False)
101
 
102
- #title = """<h1 align="center">🔥GPT4 with ChatCompletions API +🚀Gradio-Streaming</h1>"""
103
 
104
- #display message for themes feature
 
105
  theme_addon_msg = ""
106
 
107
- #Using info to add additional information about System message in GPT4
108
  system_msg_info = ""
109
 
110
- #Modifying existing Gradio Theme
111
  theme = gr.themes.Soft(primary_hue="zinc", secondary_hue="green", neutral_hue="blue",
112
- text_size=gr.themes.sizes.text_lg)
113
 
114
  with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;} #chatbot {height: 360px; overflow: auto;}""",
115
  theme=theme) as demo:
116
 
117
  with gr.Column(elem_id = "col_container"):
118
- #GPT4 API Key is provided by Huggingface
119
  with gr.Accordion("", open=False, visible=False):
120
  system_msg = gr.Textbox(value="")
121
  accordion_msg = gr.HTML(value="", visible=False)
122
  chatbot = gr.Chatbot(label='Trek Asistanı', elem_id="chatbot")
123
  inputs = gr.Textbox(placeholder= "Buraya yazın, yanıtlayalım.", show_label= False)
124
  state = gr.State([])
125
- '''with gr.Row():
126
- with gr.Column(scale=7):
127
- b1 = gr.Button().style(full_width=True)
128
- with gr.Column(scale=3):
129
- server_status_code = gr.Textbox(label="Status code from OpenAI server", )'''
130
-
131
- #top_p, temperature
132
  with gr.Accordion("", open=False, visible=False):
133
  top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=False, visible=False)
134
  temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=False, visible=False)
135
  chat_counter = gr.Number(value=0, visible=False, precision=0)
136
 
137
- #Event handling
138
  inputs.submit( predict, [system_msg, inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter],) #openai_api_key
139
- #b1.click( predict, [system_msg, inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key
140
-
141
- #b1.click(reset_textbox, [], [inputs])
142
  inputs.submit(reset_textbox, [], [inputs])
143
 
144
  demo.queue(max_size=20, concurrency_count=20).launch(debug=True)
 
3
  import json
4
  import requests
5
 
6
+
7
  API_URL = "https://api.openai.com/v1/chat/completions" #os.getenv("API_URL") + "/generate_stream"
8
 
 
9
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
10
 
11
+
12
  def predict(system_msg, inputs, top_p, temperature, chat_counter, chatbot=[], history=[]):
13
+
14
  headers = {
15
  "Content-Type": "application/json",
16
  "Authorization": f"Bearer {OPENAI_API_KEY}"
 
45
  temp["role"] = "user"
46
  temp["content"] = inputs
47
  messages.append(temp)
48
+
49
  payload = {
50
  "model": "gpt-3.5-turbo",
51
  "messages": messages,
 
60
 
61
  history.append(inputs)
62
  print(f"Logging : payload is - {payload}")
63
+
64
  response = requests.post(API_URL, headers=headers, json=payload, stream=True)
65
  print(f"Logging : response code - {response}")
66
  token_counter = 0
 
68
 
69
  counter=0
70
  for chunk in response.iter_lines():
71
+
72
  if counter == 0:
73
  counter+=1
74
  continue
75
+
76
  if chunk.decode() :
77
  chunk = chunk.decode()
78
+
79
  if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']:
80
  partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"]
81
  if token_counter == 0:
 
86
  token_counter+=1
87
  yield chat, history, chat_counter, response # resembles {chatbot: chat, state: history}
88
 
89
+
90
  def reset_textbox():
91
  return gr.update(value='')
92
 
93
+
94
  def set_visible_false():
95
  return gr.update(visible=False)
96
 
97
+
98
  def set_visible_true():
99
  return gr.update(visible=False)
100
 
 
101
 
102
+
103
+
104
  theme_addon_msg = ""
105
 
 
106
  system_msg_info = ""
107
 
108
+
109
  theme = gr.themes.Soft(primary_hue="zinc", secondary_hue="green", neutral_hue="blue",
110
+ text_size=gr.themes.sizes.text_md)
111
 
112
  with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;} #chatbot {height: 360px; overflow: auto;}""",
113
  theme=theme) as demo:
114
 
115
  with gr.Column(elem_id = "col_container"):
116
+
117
  with gr.Accordion("", open=False, visible=False):
118
  system_msg = gr.Textbox(value="")
119
  accordion_msg = gr.HTML(value="", visible=False)
120
  chatbot = gr.Chatbot(label='Trek Asistanı', elem_id="chatbot")
121
  inputs = gr.Textbox(placeholder= "Buraya yazın, yanıtlayalım.", show_label= False)
122
  state = gr.State([])
123
+
124
+
 
 
 
 
 
125
  with gr.Accordion("", open=False, visible=False):
126
  top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=False, visible=False)
127
  temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=False, visible=False)
128
  chat_counter = gr.Number(value=0, visible=False, precision=0)
129
 
130
+
131
  inputs.submit( predict, [system_msg, inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter],) #openai_api_key
 
 
 
132
  inputs.submit(reset_textbox, [], [inputs])
133
 
134
  demo.queue(max_size=20, concurrency_count=20).launch(debug=True)