ysharma HF staff commited on
Commit
bc5816a
1 Parent(s): 0b47e05

create app.py

Browse files
Files changed (1) hide show
  1. app.py +102 -0
app.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import json
4
+ import requests
5
+
6
+ #Streaming endpoint
7
+ API_URL = "https://api.openai.com/v1/chat/completions" #os.getenv("API_URL") + "/generate_stream"
8
+
9
+ #Open AI Key
10
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
11
+
12
+ def predict(inputs, top_p, temperature, openai_api_key, history=[]):
13
+
14
+ payload = {
15
+ "model": "gpt-3.5-turbo",
16
+ "messages": [{"role": "user", "content": f"{inputs}"}],
17
+ "temperature" : 1.0,
18
+ "top_p":1.0,
19
+ "n" : 1,
20
+ "stream": True,
21
+ "presence_penalty":0,
22
+ "frequency_penalty":0,
23
+ }
24
+
25
+ headers = {
26
+ "Content-Type": "application/json",
27
+ "Authorization": f"Bearer {openai_api_key}"
28
+ }
29
+
30
+
31
+ history.append(inputs)
32
+ # make a POST request to the API endpoint using the requests.post method, passing in stream=True
33
+ response = requests.post(API_URL, headers=headers, json=payload, stream=True)
34
+ #response = requests.post(API_URL, headers=headers, json=payload, stream=True)
35
+ token_counter = 0
36
+ partial_words = ""
37
+
38
+ counter=0
39
+ for chunk in response.iter_lines():
40
+ if counter == 0:
41
+ counter+=1
42
+ continue
43
+ counter+=1
44
+ # check whether each line is non-empty
45
+ if chunk :
46
+ # decode each line as response data is in bytes
47
+ if len(json.loads(chunk.decode()[6:])['choices'][0]["delta"]) == 0:
48
+ break
49
+ #print(json.loads(chunk.decode()[6:])['choices'][0]["delta"]["content"])
50
+ partial_words = partial_words + json.loads(chunk.decode()[6:])['choices'][0]["delta"]["content"]
51
+ if token_counter == 0:
52
+ history.append(" " + partial_words)
53
+ else:
54
+ history[-1] = partial_words
55
+ chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2) ] # convert to tuples of list
56
+ token_counter+=1
57
+ yield chat, history # resembles {chatbot: chat, state: history}
58
+
59
+
60
+ def reset_textbox():
61
+ return gr.update(value='')
62
+
63
+ title = """<h1 align="center">🔥ChatGPT API 🚀Streaming🚀</h1>"""
64
+ description = """Language models can be conditioned to act like dialogue agents through a conversational prompt that typically takes the form:
65
+ ```
66
+ User: <utterance>
67
+ Assistant: <utterance>
68
+ User: <utterance>
69
+ Assistant: <utterance>
70
+ ...
71
+ ```
72
+ In this app, you can explore the outputs of a 20B large language model.
73
+ """
74
+ #<a href="https://huggingface.co/spaces/ysharma/ChatGPTwithAPI?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate Space with GPU Upgrade for fast Inference & no queue<br>
75
+
76
+ with gr.Blocks(css = """#col_container {width: 700px; margin-left: auto; margin-right: auto;}
77
+ #chatbot {height: 400px; overflow: auto;}""") as demo:
78
+ gr.HTML(title)
79
+ gr.HTML()
80
+ gr.HTML('''<center><a href="https://huggingface.co/spaces/ysharma/ChatGPTwithAPI?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key</center>''')
81
+ with gr.Column(elem_id = "col_container"):
82
+ openai_api_key = gr.Textbox(type='password', label="Enter your OpenAI API key here")
83
+ chatbot = gr.Chatbot(elem_id='chatbot') #c
84
+ inputs = gr.Textbox(placeholder= "Hi there!", label= "Type an input and press Enter") #t
85
+ state = gr.State([]) #s
86
+ b1 = gr.Button()
87
+
88
+ #inputs, top_p, temperature, top_k, repetition_penalty
89
+ with gr.Accordion("Parameters", open=False):
90
+ top_p = gr.Slider( minimum=-0, maximum=1.0, value=0.95, step=0.05, interactive=True, label="Top-p (nucleus sampling)",)
91
+ temperature = gr.Slider( minimum=-0, maximum=5.0, value=0.5, step=0.1, interactive=True, label="Temperature",)
92
+ #top_k = gr.Slider( minimum=1, maximum=50, value=4, step=1, interactive=True, label="Top-k",)
93
+ #repetition_penalty = gr.Slider( minimum=0.1, maximum=3.0, value=1.03, step=0.01, interactive=True, label="Repetition Penalty", )
94
+
95
+
96
+ inputs.submit( predict, [inputs, top_p, temperature, openai_api_key, state], [chatbot, state],)
97
+ b1.click( predict, [inputs, top_p, temperature, openai_api_key, state], [chatbot, state],)
98
+ b1.click(reset_textbox, [], [inputs])
99
+ inputs.submit(reset_textbox, [], [inputs])
100
+
101
+ #gr.Markdown(description)
102
+ demo.queue().launch(debug=True)