malvika2003 commited on
Commit
90b7b96
1 Parent(s): 4852ca8

Update qachatbot.py

Browse files
Files changed (1) hide show
  1. qachatbot.py +96 -22
qachatbot.py CHANGED
@@ -1,30 +1,104 @@
1
  import gradio as gr
2
- from transformers import AutoTokenizer, TextIteratorStreamer
3
 
4
- # Define the models and their configurations
5
- model_name = "phi-2" # Replace with the actual model name
6
- model_configuration = {
7
- "toeknizer_kwargs": {'model_id': 'susnato/phi-2', 'prompt_template': 'Instruct:{instruction}\nOutput:'} # Replace with the actual tokenizer configuration
8
- }
 
 
 
 
 
 
 
 
9
 
10
- # Load the tokenizer
 
11
  tokenizer = AutoTokenizer.from_pretrained(model_name)
12
- tokenizer_kwargs = model_configuration.get("toeknizer_kwargs", {})
 
 
 
 
 
 
 
 
13
 
14
- # Define the Gradio interface
15
  def main():
16
- with gr.Row():
17
- with gr.Column(scale=4):
18
- user_text = gr.Textbox(
19
- placeholder="Write an email about an alpaca that likes flan",
20
- label="User instruction",
21
- )
22
- model_output = gr.Textbox(label="Model response", interactive=False)
23
- performance = gr.Textbox(label="Performance", lines=1, interactive=False)
 
 
 
 
 
 
 
 
 
 
24
  with gr.Column(scale=1):
25
- button_clear = gr.Button(value="Clear")
26
- button_submit = gr.Button(value="Submit")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
- # Run the Gradio interface
29
- iface = gr.Interface(fn=main, inputs=user_text, outputs=model_output, performance=performance, live=True)
30
- iface.launch()
 
1
  import gradio as gr
2
+ from transformers import pipeline, AutoTokenizer
3
 
4
+ # Define examples and model configuration
5
+ examples = [
6
+ "Give me a recipe for pizza with pineapple",
7
+ "Write me a tweet about the new OpenVINO release",
8
+ "Explain the difference between CPU and GPU",
9
+ "Give five ideas for a great weekend with family",
10
+ "Do Androids dream of Electric sheep?",
11
+ "Who is Dolly?",
12
+ "Please give me advice on how to write resume?",
13
+ "Name 3 advantages to being a cat",
14
+ "Write instructions on how to become a good AI engineer",
15
+ "Write a love letter to my best friend",
16
+ ]
17
 
18
+ # Define the model and its tokenizer
19
+ model_name = "susnato/phi-2" # Replace with your actual model identifier
20
  tokenizer = AutoTokenizer.from_pretrained(model_name)
21
+ generator = pipeline("text-generation", model=model_name, tokenizer=tokenizer)
22
+
23
+ def run_generation(user_text, top_p, temperature, top_k, max_new_tokens, performance):
24
+ prompt = f"Instruct:{user_text}\nOutput:"
25
+ response = generator(prompt, max_length=max_new_tokens, top_p=top_p, temperature=temperature, top_k=top_k)[0]["generated_text"]
26
+ return response, "N/A" # Replace "N/A" with actual performance metrics if available
27
+
28
+ def reset_textbox(*args):
29
+ return "", "", ""
30
 
 
31
  def main():
32
+ with gr.Blocks() as demo:
33
+ gr.Markdown(
34
+ "# Question Answering with OpenVINO\n"
35
+ "Provide instruction which describes a task below or select among predefined examples and model writes response that performs requested task."
36
+ )
37
+
38
+ with gr.Row():
39
+ with gr.Column(scale=4):
40
+ user_text = gr.Textbox(
41
+ placeholder="Write an email about an alpaca that likes flan",
42
+ label="User instruction",
43
+ )
44
+ model_output = gr.Textbox(label="Model response", interactive=False)
45
+ performance = gr.Textbox(label="Performance", lines=1, interactive=False)
46
+ with gr.Column(scale=1):
47
+ button_clear = gr.Button(value="Clear")
48
+ button_submit = gr.Button(value="Submit")
49
+ gr.Examples(examples, user_text)
50
  with gr.Column(scale=1):
51
+ max_new_tokens = gr.Slider(
52
+ minimum=1,
53
+ maximum=1000,
54
+ value=256,
55
+ step=1,
56
+ interactive=True,
57
+ label="Max New Tokens",
58
+ )
59
+ top_p = gr.Slider(
60
+ minimum=0.05,
61
+ maximum=1.0,
62
+ value=0.92,
63
+ step=0.05,
64
+ interactive=True,
65
+ label="Top-p (nucleus sampling)",
66
+ )
67
+ top_k = gr.Slider(
68
+ minimum=0,
69
+ maximum=50,
70
+ value=0,
71
+ step=1,
72
+ interactive=True,
73
+ label="Top-k",
74
+ )
75
+ temperature = gr.Slider(
76
+ minimum=0.1,
77
+ maximum=5.0,
78
+ value=0.8,
79
+ step=0.1,
80
+ interactive=True,
81
+ label="Temperature",
82
+ )
83
+
84
+ user_text.submit(
85
+ run_generation,
86
+ [user_text, top_p, temperature, top_k, max_new_tokens, performance],
87
+ [model_output, performance],
88
+ )
89
+ button_submit.click(
90
+ run_generation,
91
+ [user_text, top_p, temperature, top_k, max_new_tokens, performance],
92
+ [model_output, performance],
93
+ )
94
+ button_clear.click(
95
+ reset_textbox,
96
+ [user_text, model_output, performance],
97
+ [user_text, model_output, performance],
98
+ )
99
+
100
+ return demo
101
 
102
+ if __name__ == "__main__":
103
+ iface = main()
104
+ iface.launch(share=True)