Spaces:
Sleeping
Sleeping
oliveirabruno01
commited on
Commit
•
b14a8ff
1
Parent(s):
c46c78f
Add model input
Browse files
app.py
CHANGED
@@ -14,7 +14,7 @@ previous_answer = ""
|
|
14 |
is_clearing = False
|
15 |
|
16 |
|
17 |
-
def ai_response(api_key, base_url, input_text, shared_text, temperature):
|
18 |
global previous_thought
|
19 |
|
20 |
in_context_learning = [*prompts.continue_skill_example, *prompts.boilerplate_example, *prompts.continue_complete_skill_example,
|
@@ -45,7 +45,7 @@ def ai_response(api_key, base_url, input_text, shared_text, temperature):
|
|
45 |
return client.chat.completions.create(**kwargs)
|
46 |
|
47 |
stream = completion_with_backoff(
|
48 |
-
model=
|
49 |
temperature=temperature,
|
50 |
messages=messages,
|
51 |
response_format={"type": "json_object"},
|
@@ -78,6 +78,7 @@ def ai_response(api_key, base_url, input_text, shared_text, temperature):
|
|
78 |
with gr.Blocks() as demo:
|
79 |
api_input = gr.Textbox(label="Your OpenAI API key", type="password")
|
80 |
base_url = gr.Textbox(label="OpenAI API base URL", value="https://openai-proxy.replicate.com/v1")
|
|
|
81 |
|
82 |
user_input = gr.Textbox(lines=2, label="User Input")
|
83 |
cot_textbox = gr.Textbox(label="CoT etc.")
|
@@ -85,7 +86,7 @@ with gr.Blocks() as demo:
|
|
85 |
temperature = gr.Slider(label="Temperature", minimum=0, maximum=2, step=0.01, value=0.01)
|
86 |
# n_shots = gr.Slider(label="N-shots (~150 tokens each. It should not work 0-shot)", minimum=0, maximum=5, step=1, value=1)
|
87 |
ai_btn = gr.Button("Generate AI Response")
|
88 |
-
generation = ai_btn.click(fn=ai_response, inputs=[api_input, base_url, user_input, shared_textbox, temperature],
|
89 |
outputs=[shared_textbox, cot_textbox])
|
90 |
|
91 |
|
|
|
14 |
is_clearing = False
|
15 |
|
16 |
|
17 |
+
def ai_response(api_key, base_url, base_model, input_text, shared_text, temperature):
|
18 |
global previous_thought
|
19 |
|
20 |
in_context_learning = [*prompts.continue_skill_example, *prompts.boilerplate_example, *prompts.continue_complete_skill_example,
|
|
|
45 |
return client.chat.completions.create(**kwargs)
|
46 |
|
47 |
stream = completion_with_backoff(
|
48 |
+
model=base_model,
|
49 |
temperature=temperature,
|
50 |
messages=messages,
|
51 |
response_format={"type": "json_object"},
|
|
|
78 |
with gr.Blocks() as demo:
|
79 |
api_input = gr.Textbox(label="Your OpenAI API key", type="password")
|
80 |
base_url = gr.Textbox(label="OpenAI API base URL", value="https://openai-proxy.replicate.com/v1")
|
81 |
+
base_model = gr.Textbox(label="Model", value="meta/llama-2-70b-chat")
|
82 |
|
83 |
user_input = gr.Textbox(lines=2, label="User Input")
|
84 |
cot_textbox = gr.Textbox(label="CoT etc.")
|
|
|
86 |
temperature = gr.Slider(label="Temperature", minimum=0, maximum=2, step=0.01, value=0.01)
|
87 |
# n_shots = gr.Slider(label="N-shots (~150 tokens each. It should not work 0-shot)", minimum=0, maximum=5, step=1, value=1)
|
88 |
ai_btn = gr.Button("Generate AI Response")
|
89 |
+
generation = ai_btn.click(fn=ai_response, inputs=[api_input, base_url, base_model, user_input, shared_textbox, temperature],
|
90 |
outputs=[shared_textbox, cot_textbox])
|
91 |
|
92 |
|