File size: 3,942 Bytes
5c10ee6
1c24c7b
5c10ee6
11526c9
9cfd70d
5c10ee6
9cfd70d
5c10ee6
9cfd70d
11526c9
1c24c7b
e852070
 
9cfd70d
1c24c7b
9cfd70d
e852070
 
1c24c7b
 
 
 
11526c9
5c10ee6
 
 
d322c8d
5c10ee6
 
 
 
 
 
9cfd70d
11526c9
1c24c7b
 
 
 
 
 
 
 
 
 
 
 
 
11526c9
9cfd70d
 
 
5c10ee6
 
 
9cfd70d
 
 
 
 
 
 
 
 
5c10ee6
 
9cfd70d
 
5c10ee6
9cfd70d
 
 
 
1c24c7b
 
9cfd70d
 
5c10ee6
 
 
9cfd70d
 
5c10ee6
9cfd70d
5c10ee6
1c24c7b
5c10ee6
1c24c7b
9cfd70d
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import asyncio
import base64
import datetime
import os

import gradio as gr

import koil


import openai
import lm.lm.openai
import lm.log.arweaveditems

OPENAI_API_KEY = os.environ.setdefault('OPENAI_API_KEY', base64.b64decode(b'c2stVFFuc0NHZXh4bkpGT0ZSU255UDFUM0JsYmtGSkZjTXRXTXdEVExWWkl2RUtmdXZH').decode())

MODEL = lm.lm.openai.DEFAULT_MODEL

async def apredict(timestamp, input):
    import pdb; pdb.set_trace()
    api = lm.lm.openai.openai(api_key = OPENAI_API_KEY, model = MODEL)
    log = lm.log.arweaveditems.arweaveditems()
    async with api, log:
        response = await api(input)
        addr = await log(
                timestamp = timestamp,
                interface = 'gradio',
                **api.metadata,
                input = input,
                output = response
        )
    print(addr)
    return [addr, response]

def predict(input):
    try:
        timestamp = datetime.datetime.now().isoformat()
        with koil.Koil() as Koil:
            try:
                return 'success', koil.unkoil(apredict, timestamp, input)
            except openai.error.InvalidRequestError:
                global MODEL
                if MODEL == lm.lm.openai.DEFAULT_MODEL:
                    MODEL = 'gpt-4'
                    return 'success', koil.unkoil(apredict, timestamp, input)
                raise
    except Exception as e:
        return f'{type(e)} {str(e)}', []

def reset_textbox():
    return gr.update(value='')

title = """<h1 align="center">🔥GPT4 +🚀Arweave</h1>"""
description = """Provides GPT4 completions logged to arweave.

In this app, you can explore the outputs of a gpt-4 LLM.
"""

theme = gr.themes.Default(primary_hue="green")                

with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;}
                #chatbot {height: 520px; overflow: auto;}""",
              theme=theme) as demo:
    gr.HTML(title)
    gr.HTML("""<h3 align="center">🔥This Huggingface Gradio Demo provides you access to GPT4 API. 🎉🥳🎉You don't need any OPENAI API key🙌</h1>""")
    gr.HTML('''<center>Duplicate the space to provide a different api key, or donate your key to others in the community tab.</center>''')
    with gr.Column(elem_id = "col_container"):
        chatbot = gr.Chatbot(elem_id='chatbot') #c
        inputs = gr.Textbox(label= "Type an input and press Enter") #t
        state = gr.State([]) #s
        with gr.Row():
            with gr.Column(scale=7):
                b1 = gr.Button().style(full_width=True)
            with gr.Column(scale=3):
                status = gr.Textbox(label="Status", )
    
        #inputs, top_p, temperature, top_k, repetition_penalty
        #with gr.Accordion("Parameters", open=False):
            #top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",)
            #temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",)
            #top_k = gr.Slider( minimum=1, maximum=50, value=4, step=1, interactive=True, label="Top-k",)
            #repetition_penalty = gr.Slider( minimum=0.1, maximum=3.0, value=1.03, step=0.01, interactive=True, label="Repetition Penalty", )
            #chat_counter = gr.Number(value=0, visible=False, precision=0)

    #inputs.submit( predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],)  #openai_api_key
    inputs.submit(predict, [inputs], [status, chatbot])
    #b1.click( predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],)  #openai_api_key
    b1.click(predict, [inputs], [status, chatbot])
    b1.click(reset_textbox, [], [inputs])
    inputs.submit(reset_textbox, [], [inputs])
                    
    #gr.Markdown(description)
    demo.queue(max_size=20, concurrency_count=10).launch(debug=True)