mohamedemam commited on
Commit
ddaf33f
1 Parent(s): 7c099a0

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +67 -0
app.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer
3
+ import re
4
+ from peft import PeftModel, PeftConfig
5
+ from transformers import AutoModelForCausalLM
6
+
7
+ config = PeftConfig.from_pretrained("mohamedemam/Arabic-meeting-summarization")
8
+ model = AutoModelForCausalLM.from_pretrained("bigscience/bloomz-3b")
9
+ model = PeftModel.from_pretrained(model, "mohamedemam/Arabic-meeting-summarization")
10
+ # Load the tokenizer and model
11
+ model_name ="bigscience/bloomz-3b"
12
+ tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-3b")
13
+ model.eval()
14
+ import wikipediaapi
15
+
16
+
17
+ # Create a Wikipedia API instance
18
+ wiki_wiki = wikipediaapi.Wikipedia('MyProjectName (merlin@example.com)', 'en')
19
+ page_py = wiki_wiki.page('Leo messi')
20
+ example_contexts=page_py.text.split(f"\n")
21
+ for i in range(len(example_contexts)):
22
+ example_contexts[i]=re.sub(f'\n'," ", example_contexts[i])
23
+ # Recommended words for users to choose from
24
+
25
+
26
+ # Function to generate questions and answers with configurable parameters
27
+ def generate_qa(context, temperature, top_p,num_seq,l_p, num_b):
28
+ input_text = context
29
+ input_ids = tokenizer(input_text, return_tensors='pt')
30
+
31
+ # Generate with configurable parameters
32
+ output = model.generate(
33
+ **input_ids,
34
+ temperature=temperature,
35
+ top_p=top_p,
36
+ num_return_sequences=num_seq,
37
+
38
+ max_length=100,
39
+ num_beams=num_b,
40
+ length_penalty=l_p,
41
+ do_sample=True,
42
+
43
+ )
44
+ #
45
+ generated_text = tokenizer.batch_decode(output, skip_special_tokens=True)
46
+
47
+ formatted_output = "\n\n".join(set(generated_text))
48
+ return formatted_output
49
+ iface = gr.Interface(
50
+ fn=generate_qa,
51
+ inputs=[
52
+ gr.inputs.Dropdown(example_contexts, label="Choose an Example"),
53
+ gr.inputs.Slider(minimum=0.0, maximum=5, default=2.1, step=0.01, label="Temperature"),
54
+ gr.inputs.Slider(minimum=0.0, maximum=1, default=0.5, step=0.01, label="Top-p"),
55
+ gr.inputs.Slider(minimum=1, maximum=20, default=3, step=1, label="num of sequance"),
56
+ gr.inputs.Slider(minimum=0.01, maximum=5, default=3, step=.01, label="l_p")
57
+ ,
58
+ gr.inputs.Slider(minimum=1, maximum=20, default=3, step=1, label="num of beams"),
59
+
60
+
61
+ ],
62
+ outputs=gr.outputs.Textbox(label="Generated Output"),
63
+ title="Question Generation and Answering",
64
+ description="Select an example context, choose a recommended word, adjust temperature and top-p. The model will generate questions and answers.",
65
+ )
66
+ # Launch the interface
67
+ iface.launch()