Yuntian Deng commited on
Commit
eb3516b
0 Parent(s):

Duplicate from yuntian-deng/ChatGPT4

Browse files
Files changed (3) hide show
  1. .gitattributes +34 -0
  2. README.md +14 -0
  3. app.py +188 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Chat-with-GPT4
3
+ emoji: 🚀
4
+ colorFrom: red
5
+ colorTo: indigo
6
+ sdk: gradio
7
+ sdk_version: 3.21.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ duplicated_from: yuntian-deng/ChatGPT4
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import sys
4
+ import json
5
+ import requests
6
+
7
+ #Streaming endpoint
8
+ API_URL = os.getenv("API_URL")
9
+ DISABLED = os.getenv("DISABLED") == 'True'
10
+
11
+ #Testing with my Open AI Key
12
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
13
+
14
+ #Supress errors
15
+ def exception_handler(exception_type, exception, traceback):
16
+ print("%s: %s" % (exception_type.__name__, exception))
17
+ sys.excepthook = exception_handler
18
+ sys.tracebacklimit = 0
19
+
20
+ #https://github.com/gradio-app/gradio/issues/3531#issuecomment-1484029099
21
+ def parse_codeblock(text):
22
+ lines = text.split("\n")
23
+ for i, line in enumerate(lines):
24
+ if "```" in line:
25
+ if line != "```":
26
+ lines[i] = f'<pre><code class="{lines[i][3:]}">'
27
+ else:
28
+ lines[i] = '</code></pre>'
29
+ else:
30
+ if i > 0:
31
+ lines[i] = "<br/>" + line.replace("<", "&lt;").replace(">", "&gt;")
32
+ return "".join(lines)
33
+
34
+ def predict(inputs, top_p, temperature, chat_counter, chatbot=[], history=[]):
35
+
36
+ payload = {
37
+ "model": "gpt-4",
38
+ "messages": [{"role": "user", "content": f"{inputs}"}],
39
+ "temperature" : 1.0,
40
+ "top_p":1.0,
41
+ "n" : 1,
42
+ "stream": True,
43
+ "presence_penalty":0,
44
+ "frequency_penalty":0,
45
+ }
46
+
47
+ headers = {
48
+ "Content-Type": "application/json",
49
+ "Authorization": f"Bearer {OPENAI_API_KEY}"
50
+ }
51
+
52
+ # print(f"chat_counter - {chat_counter}")
53
+ if chat_counter != 0 :
54
+ messages = []
55
+ for i, data in enumerate(history):
56
+ if i % 2 == 0:
57
+ role = 'user'
58
+ else:
59
+ role = 'assistant'
60
+ temp = {}
61
+ temp["role"] = role
62
+ temp["content"] = data
63
+ messages.append(temp)
64
+
65
+ temp3 = {}
66
+ temp3["role"] = "user"
67
+ temp3["content"] = inputs
68
+ messages.append(temp3)
69
+ payload = {
70
+ "model": "gpt-4",
71
+ "messages": messages,
72
+ "temperature" : temperature, #1.0,
73
+ "top_p": top_p, #1.0,
74
+ "n" : 1,
75
+ "stream": True,
76
+ "presence_penalty":0,
77
+ "frequency_penalty":0,
78
+ }
79
+
80
+ chat_counter+=1
81
+
82
+ history.append(inputs)
83
+ # make a POST request to the API endpoint using the requests.post method, passing in stream=True
84
+ response = requests.post(API_URL, headers=headers, json=payload, stream=True)
85
+ response_code = f"{response}"
86
+ if response_code.strip() != "<Response [200]>":
87
+ #print(f"response code - {response}")
88
+ raise Exception(f"Sorry, hitting rate limit. Please try again later. {response}")
89
+ token_counter = 0
90
+ partial_words = ""
91
+ counter=0
92
+ for chunk in response.iter_lines():
93
+ #Skipping first chunk
94
+ if counter == 0:
95
+ counter+=1
96
+ continue
97
+ #counter+=1
98
+ # check whether each line is non-empty
99
+ if chunk.decode() :
100
+ chunk = chunk.decode()
101
+ # decode each line as response data is in bytes
102
+ if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']:
103
+ #if len(json.loads(chunk.decode()[6:])['choices'][0]["delta"]) == 0:
104
+ # break
105
+ partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"]
106
+ if token_counter == 0:
107
+ history.append(" " + partial_words)
108
+ else:
109
+ history[-1] = partial_words
110
+ chat = [(parse_codeblock(history[i]), parse_codeblock(history[i + 1])) for i in range(0, len(history) - 1, 2) ] # convert to tuples of list
111
+ token_counter+=1
112
+ yield chat, history, chat_counter, response # resembles {chatbot: chat, state: history}
113
+ print(json.dumps({"chat_counter": chat_counter, "payload": payload, "partial_words": partial_words, "token_counter": token_counter, "counter": counter}))
114
+
115
+
116
+ def reset_textbox():
117
+ return gr.update(value='')
118
+
119
+ title = """<h1 align="center">🔥GPT4 with ChatCompletions API +🚀Gradio-Streaming</h1>"""
120
+ if DISABLED:
121
+ title = """<h1 align="center" style="color:red">This app has reached OpenAI's usage limit. We are currently requesting an increase in our quota. Please check back in a few days.</h1>"""
122
+ description = """Language models can be conditioned to act like dialogue agents through a conversational prompt that typically takes the form:
123
+ ```
124
+ User: <utterance>
125
+ Assistant: <utterance>
126
+ User: <utterance>
127
+ Assistant: <utterance>
128
+ ...
129
+ ```
130
+ In this app, you can explore the outputs of a gpt-4 LLM.
131
+ """
132
+
133
+ theme = gr.themes.Default(primary_hue="green")
134
+
135
+ with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;}
136
+ #chatbot {height: 520px; overflow: auto;}""",
137
+ theme=theme) as demo:
138
+ gr.HTML(title)
139
+ gr.HTML("""<h3 align="center">🔥This Huggingface Gradio Demo provides you full access to GPT4 API (4096 token limit). 🎉🥳🎉You don't need any OPENAI API key🙌</h1>""")
140
+ gr.HTML('''<center><a href="https://huggingface.co/spaces/ysharma/ChatGPT4?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key</center>''')
141
+ with gr.Column(elem_id = "col_container", visible=False) as main_block:
142
+ #GPT4 API Key is provided by Huggingface
143
+ #openai_api_key = gr.Textbox(type='password', label="Enter only your GPT4 OpenAI API key here")
144
+ chatbot = gr.Chatbot(elem_id='chatbot') #c
145
+ inputs = gr.Textbox(placeholder= "Hi there!", label= "Type an input and press Enter") #t
146
+ state = gr.State([]) #s
147
+ with gr.Row():
148
+ with gr.Column(scale=7):
149
+ b1 = gr.Button(visible=not DISABLED).style(full_width=True)
150
+ with gr.Column(scale=3):
151
+ server_status_code = gr.Textbox(label="Status code from OpenAI server", )
152
+
153
+ #inputs, top_p, temperature, top_k, repetition_penalty
154
+ with gr.Accordion("Parameters", open=False):
155
+ top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",)
156
+ temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",)
157
+ #top_k = gr.Slider( minimum=1, maximum=50, value=4, step=1, interactive=True, label="Top-k",)
158
+ #repetition_penalty = gr.Slider( minimum=0.1, maximum=3.0, value=1.03, step=0.01, interactive=True, label="Repetition Penalty", )
159
+ chat_counter = gr.Number(value=0, visible=False, precision=0)
160
+
161
+ with gr.Column(elem_id = "user_consent_container") as user_consent_block:
162
+ # Get user consent
163
+ with gr.Accordion("User Consent for Data Collection, Use, and Sharing", open=True):
164
+ gr.HTML("""
165
+ <div>
166
+ <p>By using our app, which is powered by OpenAI's API, you acknowledge and agree to the following terms regarding the data you provide:</p>
167
+ <ol>
168
+ <li><strong>Collection:</strong> We may collect information, including the inputs you type into our app and the outputs generated by OpenAI's API.</li>
169
+ <li><strong>Use:</strong> We may use the collected data for research purposes, to improve our services, and to develop new products or services, including commercial applications.</li>
170
+ <li><strong>Sharing and Publication:</strong> Your data may be published, shared with third parties, or used for analysis and reporting purposes.</li>
171
+ <li><strong>Data Retention:</strong> We may retain your data for as long as necessary.</li>
172
+ </ol>
173
+ <p>By continuing to use our app, you provide your explicit consent to the collection, use, and potential sharing of your data as described above. If you do not agree with our data collection, use, and sharing practices, please do not use our app.</p>
174
+ </div>
175
+ """)
176
+ accept_button = gr.Button("I Agree")
177
+
178
+ def enable_inputs():
179
+ return user_consent_block.update(visible=False), main_block.update(visible=True)
180
+
181
+ accept_button.click(fn=enable_inputs, inputs=[], outputs=[user_consent_block, main_block])
182
+
183
+ inputs.submit( predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key
184
+ b1.click( predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key
185
+ b1.click(reset_textbox, [], [inputs])
186
+ inputs.submit(reset_textbox, [], [inputs])
187
+
188
+ demo.queue(max_size=20, concurrency_count=10).launch()