John Doe commited on
Commit
5c10ee6
1 Parent(s): 9cfd70d
Files changed (2) hide show
  1. app.py +38 -105
  2. requirements.txt +2 -0
app.py CHANGED
@@ -1,106 +1,39 @@
1
- import gradio as gr
2
- import os
3
- import json
4
- import requests
5
-
6
- #Streaming endpoint
7
- API_URL = "https://api.openai.com/v1/chat/completions" #os.getenv("API_URL") + "/generate_stream"
8
-
9
- #Testing with my Open AI Key
10
- OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
11
-
12
- def predict(inputs, top_p, temperature, chat_counter, chatbot=[], history=[]):
13
 
14
- payload = {
15
- "model": "gpt-4",
16
- "messages": [{"role": "user", "content": f"{inputs}"}],
17
- "temperature" : 1.0,
18
- "top_p":1.0,
19
- "n" : 1,
20
- "stream": True,
21
- "presence_penalty":0,
22
- "frequency_penalty":0,
23
- }
24
-
25
- headers = {
26
- "Content-Type": "application/json",
27
- "Authorization": f"Bearer {OPENAI_API_KEY}"
28
- }
29
 
30
- print(f"chat_counter - {chat_counter}")
31
- if chat_counter != 0 :
32
- messages=[]
33
- for data in chatbot:
34
- temp1 = {}
35
- temp1["role"] = "user"
36
- temp1["content"] = data[0]
37
- temp2 = {}
38
- temp2["role"] = "assistant"
39
- temp2["content"] = data[1]
40
- messages.append(temp1)
41
- messages.append(temp2)
42
- temp3 = {}
43
- temp3["role"] = "user"
44
- temp3["content"] = inputs
45
- messages.append(temp3)
46
- #messages
47
- payload = {
48
- "model": "gpt-4",
49
- "messages": messages, #[{"role": "user", "content": f"{inputs}"}],
50
- "temperature" : temperature, #1.0,
51
- "top_p": top_p, #1.0,
52
- "n" : 1,
53
- "stream": True,
54
- "presence_penalty":0,
55
- "frequency_penalty":0,
56
- }
57
 
58
- chat_counter+=1
 
59
 
60
- history.append(inputs)
61
- print(f"payload is - {payload}")
62
- # make a POST request to the API endpoint using the requests.post method, passing in stream=True
63
- response = requests.post(API_URL, headers=headers, json=payload, stream=True)
64
- print(f"response code - {response}")
65
- token_counter = 0
66
- partial_words = ""
67
 
68
- counter=0
69
- for chunk in response.iter_lines():
70
- #Skipping first chunk
71
- if counter == 0:
72
- counter+=1
73
- continue
74
- #counter+=1
75
- # check whether each line is non-empty
76
- if chunk.decode() :
77
- chunk = chunk.decode()
78
- # decode each line as response data is in bytes
79
- if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']:
80
- #if len(json.loads(chunk.decode()[6:])['choices'][0]["delta"]) == 0:
81
- # break
82
- partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"]
83
- if token_counter == 0:
84
- history.append(" " + partial_words)
85
- else:
86
- history[-1] = partial_words
87
- chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2) ] # convert to tuples of list
88
- token_counter+=1
89
- yield chat, history, chat_counter, response # resembles {chatbot: chat, state: history}
90
-
91
 
92
  def reset_textbox():
93
  return gr.update(value='')
94
 
95
- title = """<h1 align="center">🔥GPT4 with ChatCompletions API +🚀Gradio-Streaming</h1>"""
96
- description = """Language models can be conditioned to act like dialogue agents through a conversational prompt that typically takes the form:
97
- ```
98
- User: <utterance>
99
- Assistant: <utterance>
100
- User: <utterance>
101
- Assistant: <utterance>
102
- ...
103
- ```
104
  In this app, you can explore the outputs of a gpt-4 LLM.
105
  """
106
 
@@ -110,13 +43,11 @@ with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;}
110
  #chatbot {height: 520px; overflow: auto;}""",
111
  theme=theme) as demo:
112
  gr.HTML(title)
113
- gr.HTML("""<h3 align="center">🔥This Huggingface Gradio Demo provides you full access to GPT4 API (4096 token limit). 🎉🥳🎉You don't need any OPENAI API key🙌</h1>""")
114
- gr.HTML('''<center><a href="https://huggingface.co/spaces/ysharma/ChatGPT4?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key</center>''')
115
  with gr.Column(elem_id = "col_container"):
116
- #GPT4 API Key is provided by Huggingface
117
- #openai_api_key = gr.Textbox(type='password', label="Enter only your GPT4 OpenAI API key here")
118
  chatbot = gr.Chatbot(elem_id='chatbot') #c
119
- inputs = gr.Textbox(placeholder= "Hi there!", label= "Type an input and press Enter") #t
120
  state = gr.State([]) #s
121
  with gr.Row():
122
  with gr.Column(scale=7):
@@ -125,15 +56,17 @@ with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;}
125
  server_status_code = gr.Textbox(label="Status code from OpenAI server", )
126
 
127
  #inputs, top_p, temperature, top_k, repetition_penalty
128
- with gr.Accordion("Parameters", open=False):
129
- top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",)
130
- temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",)
131
  #top_k = gr.Slider( minimum=1, maximum=50, value=4, step=1, interactive=True, label="Top-k",)
132
  #repetition_penalty = gr.Slider( minimum=0.1, maximum=3.0, value=1.03, step=0.01, interactive=True, label="Repetition Penalty", )
133
- chat_counter = gr.Number(value=0, visible=False, precision=0)
134
 
135
- inputs.submit( predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key
136
- b1.click( predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key
 
 
137
  b1.click(reset_textbox, [], [inputs])
138
  inputs.submit(reset_textbox, [], [inputs])
139
 
 
1
+ import asyncio
2
+ import datetime
 
 
 
 
 
 
 
 
 
 
3
 
4
+ import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
+ import koil
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
+ from lm.lm.openai import openai
9
+ from lm.log.arweaveditems import arweaveditems
10
 
11
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
 
 
 
 
 
 
12
 
13
+ @koil.unkoil
14
+ async def predict(input):
15
+ timestamp = datetime.datetime.now().isoformat()
16
+ try:
17
+ api = openai(api_key = OPENAI_API_KEY)
18
+ except:
19
+ api = openai(api_key = OPENAI_API_KEY, model = 'gpt-4')
20
+ async with api as api, arweaveditems() as log:
21
+ response = await api(input)
22
+ addr = await log(
23
+ timestamp = timestamp,
24
+ **api.metadata,
25
+ input = input,
26
+ output = response
27
+ )
28
+ print(addr)
29
+ return [addr, response]
 
 
 
 
 
 
30
 
31
  def reset_textbox():
32
  return gr.update(value='')
33
 
34
+ title = """<h1 align="center">🔥GPT4 +🚀Arweave</h1>"""
35
+ description = """Provides GPT4 completions logged to arweave.
36
+
 
 
 
 
 
 
37
  In this app, you can explore the outputs of a gpt-4 LLM.
38
  """
39
 
 
43
  #chatbot {height: 520px; overflow: auto;}""",
44
  theme=theme) as demo:
45
  gr.HTML(title)
46
+ gr.HTML("""<h3 align="center">🔥This Huggingface Gradio Demo provides you access to GPT4 API. 🎉🥳🎉You don't need any OPENAI API key🙌</h1>""")
47
+ gr.HTML('''<center>Duplicate the space to provide a different api key, or donate your key to others in the community tab.</center>''')
48
  with gr.Column(elem_id = "col_container"):
 
 
49
  chatbot = gr.Chatbot(elem_id='chatbot') #c
50
+ inputs = gr.Textbox(label= "Type an input and press Enter") #t
51
  state = gr.State([]) #s
52
  with gr.Row():
53
  with gr.Column(scale=7):
 
56
  server_status_code = gr.Textbox(label="Status code from OpenAI server", )
57
 
58
  #inputs, top_p, temperature, top_k, repetition_penalty
59
+ #with gr.Accordion("Parameters", open=False):
60
+ #top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",)
61
+ #temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",)
62
  #top_k = gr.Slider( minimum=1, maximum=50, value=4, step=1, interactive=True, label="Top-k",)
63
  #repetition_penalty = gr.Slider( minimum=0.1, maximum=3.0, value=1.03, step=0.01, interactive=True, label="Repetition Penalty", )
64
+ #chat_counter = gr.Number(value=0, visible=False, precision=0)
65
 
66
+ #inputs.submit( predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key
67
+ inputs.submit(predict, [inputs], [chatbot])
68
+ #b1.click( predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key
69
+ b1.click(predict, [inputs], [chatbot])
70
  b1.click(reset_textbox, [], [inputs])
71
  inputs.submit(reset_textbox, [], [inputs])
72
 
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ koil
2
+ lm[openai,arweave] @ git+https://codeberg.org/xloem/lm.git