research14 commited on
Commit
956b24f
1 Parent(s): df2a22b
Files changed (2) hide show
  1. app.py +68 -51
  2. requirements.txt +2 -1
app.py CHANGED
@@ -1,58 +1,75 @@
1
  import gradio as gr
 
2
 
3
- theme = gr.themes.Soft()
 
 
 
 
 
 
 
 
4
 
5
- query_examples=[
6
- ("Default Option 1"),
7
- ("Default Option 2"),
8
- ("Default Option 3")
9
- ]
 
 
 
 
10
 
11
- # Define a function that echoes the input text
12
- def echo_text(input_text):
13
- return input_text
 
 
 
14
 
15
- with gr.Interface(
16
- fn=echo_text,
17
- inputs=gr.Textbox(placeholder="Enter text here..."),
 
18
  outputs=gr.Textbox(),
19
- examples=query_examples,
20
  live=True,
21
- title="LLM Evaluator with Linguistic Scrutiny",
22
- theme=theme
23
- ) as iface:
24
- blocks = gr.Blocks()
25
-
26
- with gr.Row():
27
- vicuna_model_selector = gr.Dropdown(["7b", "13b", "33b"], label="Vicuna Model", placeholder="Select model size")
28
- llama_model_selector = gr.Dropdown(["7B", "13B", "30B", "65B"], label="LLaMa Model", placeholder="Select model size")
29
- chatgpt_api_key = gr.Textbox(label="ChatGPT API Key", type="password", placeholder="Enter your API key")
30
-
31
- # Strategy 1 - QA-Based Prompting
32
- with gr.Accordion("Strategy 1 - QA-Based Prompting", style="font-weight: bold; font-size: 16px;"):
33
- with gr.Row():
34
- chatgpt_btn = gr.Button("ChatGPT")
35
- llama_btn = gr.Button("LLaMA")
36
- vicuna_btn = gr.Button("Vicuna")
37
- alpaca_btn = gr.Button("Alpaca")
38
- flant5_btn = gr.Button("Flan-T5")
39
-
40
- # Strategy 2 - Instruction-Based Prompting
41
- with gr.Accordion("Strategy 2 - Instruction-Based Prompting", style="font-weight: bold; font-size: 16px;"):
42
- with gr.Row():
43
- chatgpt_btn = gr.Button("ChatGPT")
44
- llama_btn = gr.Button("LLaMA")
45
- vicuna_btn = gr.Button("Vicuna")
46
- alpaca_btn = gr.Button("Alpaca")
47
- flant5_btn = gr.Button("Flan-T5")
48
-
49
- # Strategy 3 - Structured Prompting
50
- with gr.Accordion("Strategy 3 - Structured Prompting", style="font-weight: bold; font-size: 16px;"):
51
- with gr.Row():
52
- chatgpt_btn = gr.Button("ChatGPT")
53
- llama_btn = gr.Button("LLaMA")
54
- vicuna_btn = gr.Button("Vicuna")
55
- alpaca_btn = gr.Button("Alpaca")
56
- flant5_btn = gr.Button("Flan-T5")
57
-
58
- iface.launch()
 
1
  import gradio as gr
2
+ from transformers import pipeline
3
 
4
+ # Initialize a dictionary of models for each strategy
5
+ models = {
6
+ "GPT-2": "gpt2",
7
+ "ChatGPT": "EleutherAI/gpt-neo-1.3",
8
+ "LLaMa": "model_for_LLaMa",
9
+ "Vicuna": "model_for_Vicuna",
10
+ "Alpaca": "model_for_Alpaca",
11
+ "Flan-T5": "model_for_Flan-T5",
12
+ }
13
 
14
+ # Define a function to generate text based on the selected model
15
+ def generate_text(input_instruction, selected_model):
16
+ if selected_model in models:
17
+ model_name = models[selected_model]
18
+ pipe = pipeline("text-generation", model=model_name)
19
+ generated_text = pipe(input_instruction, max_length=100, do_sample=True)[0]['generated_text']
20
+ return generated_text
21
+ else:
22
+ return "Please select a model for this strategy."
23
 
24
+ # Define example instructions for testing
25
+ instruction_examples = [
26
+ ("Write a short story about a cat."),
27
+ ("Explain the concept of artificial intelligence."),
28
+ ("Compose a poem about nature."),
29
+ ]
30
 
31
+ # Create a Gradio interface
32
+ iface = gr.Interface(
33
+ fn=generate_text,
34
+ inputs=gr.Textbox(placeholder="Enter instruction here..."),
35
  outputs=gr.Textbox(),
36
+ examples=instruction_examples,
37
  live=True,
38
+ title="Text Generation with Dynamic Model Selection",
39
+ )
40
+
41
+ # Additional input section 1 - User input
42
+ with gr.Row():
43
+ user_input = gr.Textbox(placeholder="Enter your input...")
44
+
45
+ # Additional input section 2 - Strategy 1
46
+ with gr.Row():
47
+ strategy1_selector = gr.Dropdown(list(models.keys()), label="Strategy 1 - QA-Based Prompting")
48
+
49
+ # Additional input section 3 - Strategy 2
50
+ with gr.Row():
51
+ strategy2_selector = gr.Dropdown(list(models.keys()), label="Strategy 2 - Instruction-Based Prompting")
52
+
53
+ # Additional input section 4 - Strategy 3
54
+ with gr.Row():
55
+ strategy3_selector = gr.Dropdown(list(models.keys()), label="Strategy 3 - Structured Prompting")
56
+
57
+ # Create a callback function for dynamic model selection
58
+ def update_model_and_generate_text(input_instruction, selected_model):
59
+ if selected_model:
60
+ selected_model_name = strategy1_selector.value if selected_model == "Strategy 1 - QA-Based Prompting" else (
61
+ strategy2_selector.value if selected_model == "Strategy 2 - Instruction-Based Prompting" else
62
+ strategy3_selector.value)
63
+ iface.set_function(generate_text, inputs=[user_input, selected_model], outputs="outputs")
64
+ return "Selected model: " + selected_model_name
65
+
66
+ # Add a submit button to trigger dynamic model selection
67
+ submit_button = gr.Button("Submit")
68
+
69
+ # Set the function for the Gradio interface to the update_model_and_generate_text function
70
+ iface.fn = update_model_and_generate_text
71
+
72
+ # Add the submit button to the interface
73
+ iface.add(submit_button)
74
+
75
+ iface.launch()
requirements.txt CHANGED
@@ -3,4 +3,5 @@ torch
3
  openai
4
  sentencepiece
5
  fschat
6
- accelerate
 
 
3
  openai
4
  sentencepiece
5
  fschat
6
+ accelerate
7
+ gpt2