bilgeyucel commited on
Commit
3de0eaa
1 Parent(s): 09b81f2

add more input

Browse files
Files changed (1) hide show
  1. app.py +26 -9
app.py CHANGED
@@ -3,22 +3,39 @@ from haystack.nodes import PromptNode
3
 
4
  from utils import lemmatizer_func
5
 
6
- def run_prompt(prompt, api_key):
7
- prompt_node = PromptNode(model_name_or_path="gpt-3.5-turbo", api_key=api_key)
8
  lemmatized_prompt = lemmatizer_func(prompt)
9
  response_plain = prompt_node(prompt)
10
  response_lemmatized = prompt_node(lemmatized_prompt)
11
  return response_plain[0][0], response_plain[1]["total_tokens"], response_lemmatized[0][0], response_lemmatized[1]["total_tokens"]
12
 
13
  with gr.Blocks() as demo:
14
- api_key = gr.Textbox(label="Enter your api key")
15
- prompt = gr.Textbox(label="Prompt", value="Rachel has 17 apples. She gives 9 to Sarah. How many apples does Rachel have now?")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  submit_btn = gr.Button("Submit")
17
- token_count_plain = gr.Number(label="Plain Text Token Count")
18
- token_count_lemmatized = gr.Number(label="Lemmatized Text Token Count")
19
- prompt_response = gr.Textbox(label="Answer", show_copy_button=True)
20
- lemmatized_prompt_response = gr.Textbox(label="Lemm Answer", show_copy_button=True)
21
- submit_btn.click(fn=run_prompt, inputs=[prompt, api_key], outputs=[prompt_response, token_count_plain, lemmatized_prompt_response, token_count_lemmatized])
 
 
22
 
23
  demo.launch()
24
 
 
3
 
4
  from utils import lemmatizer_func
5
 
6
+ def run_prompt(prompt, api_key, model_name, max_length):
7
+ prompt_node = PromptNode(model_name_or_path=model_name, api_key=api_key, max_length=max_length)
8
  lemmatized_prompt = lemmatizer_func(prompt)
9
  response_plain = prompt_node(prompt)
10
  response_lemmatized = prompt_node(lemmatized_prompt)
11
  return response_plain[0][0], response_plain[1]["total_tokens"], response_lemmatized[0][0], response_lemmatized[1]["total_tokens"]
12
 
13
  with gr.Blocks() as demo:
14
+ with gr.Row():
15
+ api_key = gr.Textbox(label="Enter your api key")
16
+ model_name = gr.Dropdown(["text-davinci-003", "gpt-3.5-turbo", "gpt-4", "gpt-4-32k", "command", "command-light", "base", "base-light"], value="gpt-3.5-turbo", label="Choose your model!")
17
+ with gr.Row():
18
+ prompt = gr.TextArea(label="Prompt", value="Rachel has 17 apples. She gives 9 to Sarah. How many apples does Rachel have now?")
19
+ gr.Examples(
20
+ [
21
+ ["I want you to act as a travel guide. I will write you my location and you will suggest a place to visit near my location. In some cases, I will also give you the type of places I will visit. You will also suggest me places of similar type that are close to my first location. My first suggestion request is \"I am in Italy and I want to visit only museums.\""],
22
+ ["What's the Everett interpretation of quantum mechanics?"],
23
+ ["Give me a list of the top 10 dive sites you would recommend around the world."],
24
+ ["Can you tell me more about deep-water soloing?"],
25
+ ["Can you write a short tweet about the Apache 2.0 release of our latest AI model, Falcon LLM?"],
26
+ ],
27
+ inputs=prompt,
28
+ label="Click on any example and press Enter in the input textbox!",
29
+ )
30
+ max_length = gr.Slider(100, 500, value=100, step=10, label="Max Length", info="Choose between 100 and 500")
31
  submit_btn = gr.Button("Submit")
32
+ with gr.Row():
33
+ prompt_response = gr.TextArea(label="Answer", show_copy_button=True)
34
+ token_count_plain = gr.Number(label="Plain Text Token Count")
35
+ with gr.Row():
36
+ lemmatized_prompt_response = gr.TextArea(label="Lemmatized Answer", show_copy_button=True)
37
+ token_count_lemmatized = gr.Number(label="Lemmatized Text Token Count")
38
+ submit_btn.click(fn=run_prompt, inputs=[prompt, api_key, model_name, max_length], outputs=[prompt_response, token_count_plain, lemmatized_prompt_response, token_count_lemmatized])
39
 
40
  demo.launch()
41