mohamedemam commited on
Commit
5e20e58
1 Parent(s): 9fe3114

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -0
app.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
+
4
+ # Load the tokenizer and model
5
+ model_name = "mohamedemam/QA_GeneraTor"
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
8
+
9
+ # Function to generate questions and answers with configurable parameters
10
+ def generate_qa(context, temperature, top_p):
11
+ input_text = f"Quation answer: {context}"
12
+ input_ids = ttokenizer(input_text,max_length=400,truncation=True,padding="max_length",return_tensors='pt')
13
+
14
+ # Generate with configurable parameters
15
+ output = model.generate(
16
+ input_ids,
17
+ max_length=150,
18
+ num_return_sequences=1,
19
+ no_repeat_ngram_size=2,
20
+ temperature=temperature,
21
+ top_p=top_p
22
+ )
23
+
24
+ generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
25
+ return generated_text
26
+
27
+ # Create the Gradio interface with sliders for temperature and top-p
28
+ iface = gr.Interface(
29
+ fn=generate_qa,
30
+ inputs=["text", gr.inputs.Slider(minimum=0.2, maximum=2, default=1, step=0.1, label="Temperature"),
31
+ gr.inputs.Slider(minimum=0.1, maximum=1, default=0.8, step=0.1, label="Top-p")],
32
+ outputs="text",
33
+ title="Question Generation and Answering",
34
+ description="Enter a context, adjust temperature and top-p, and the model will generate a question and answer.",
35
+ )
36
+
37
+ # Launch the interface
38
+ iface.launch()