Spaces:
Sleeping
Sleeping
import asyncio | |
import datetime | |
import gradio as gr | |
import koil | |
from lm.lm.openai import openai | |
from lm.log.arweaveditems import arweaveditems | |
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") | |
async def predict(input): | |
timestamp = datetime.datetime.now().isoformat() | |
try: | |
api = openai(api_key = OPENAI_API_KEY) | |
except: | |
api = openai(api_key = OPENAI_API_KEY, model = 'gpt-4') | |
async with api as api, arweaveditems() as log: | |
response = await api(input) | |
addr = await log( | |
timestamp = timestamp, | |
interface = 'gradio', | |
**api.metadata, | |
input = input, | |
output = response | |
) | |
print(addr) | |
return [addr, response] | |
def reset_textbox(): | |
return gr.update(value='') | |
title = """<h1 align="center">🔥GPT4 +🚀Arweave</h1>""" | |
description = """Provides GPT4 completions logged to arweave. | |
In this app, you can explore the outputs of a gpt-4 LLM. | |
""" | |
theme = gr.themes.Default(primary_hue="green") | |
with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;} | |
#chatbot {height: 520px; overflow: auto;}""", | |
theme=theme) as demo: | |
gr.HTML(title) | |
gr.HTML("""<h3 align="center">🔥This Huggingface Gradio Demo provides you access to GPT4 API. 🎉🥳🎉You don't need any OPENAI API key🙌</h1>""") | |
gr.HTML('''<center>Duplicate the space to provide a different api key, or donate your key to others in the community tab.</center>''') | |
with gr.Column(elem_id = "col_container"): | |
chatbot = gr.Chatbot(elem_id='chatbot') #c | |
inputs = gr.Textbox(label= "Type an input and press Enter") #t | |
state = gr.State([]) #s | |
with gr.Row(): | |
with gr.Column(scale=7): | |
b1 = gr.Button().style(full_width=True) | |
with gr.Column(scale=3): | |
server_status_code = gr.Textbox(label="Status code from OpenAI server", ) | |
#inputs, top_p, temperature, top_k, repetition_penalty | |
#with gr.Accordion("Parameters", open=False): | |
#top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",) | |
#temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",) | |
#top_k = gr.Slider( minimum=1, maximum=50, value=4, step=1, interactive=True, label="Top-k",) | |
#repetition_penalty = gr.Slider( minimum=0.1, maximum=3.0, value=1.03, step=0.01, interactive=True, label="Repetition Penalty", ) | |
#chat_counter = gr.Number(value=0, visible=False, precision=0) | |
#inputs.submit( predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key | |
inputs.submit(predict, [inputs], [chatbot]) | |
#b1.click( predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key | |
b1.click(predict, [inputs], [chatbot]) | |
b1.click(reset_textbox, [], [inputs]) | |
inputs.submit(reset_textbox, [], [inputs]) | |
#gr.Markdown(description) | |
demo.queue(max_size=20, concurrency_count=10).launch(debug=True) | |