itsmariamaraki commited on
Commit
37d2675
β€’
1 Parent(s): d91f8b4

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +69 -0
app.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://huggingface.co/spaces/itsmariamaraki/AAI-Assessment3
2
+
3
+ # Here are the imports
4
+
5
+ import gradio as gr
6
+ import PyPDF2
7
+ from PyPDF2 import PdfReader
8
+ from pdfminer.high_level import extract_pages, extract_text
9
+ from transformers import pipeline, AutoProcessor, AutoModel, AutoTokenizer
10
+ import torch
11
+ import soundfile as sf
12
+ from IPython.display import Audio
13
+ from datasets import load_dataset
14
+ from io import BytesIO
15
+
16
+ # Here is the code
17
+
18
+ def abstract(pdf_file):
19
+ pdf_bytes = BytesIO(pdf_file)
20
+ pdf_reader = PyPDF2.PdfReader(pdf_bytes)
21
+
22
+ abstract = ''
23
+
24
+ for page_number in range(len(pdf_reader.pages)):
25
+ text = pdf_reader.pages[page_number].extract_text()
26
+
27
+ if 'abstract' in text.lower(): #in order to read only the abstract, i set as a start the abstract point & as an end the introduction point
28
+ start_index = text.lower().find('abstract')
29
+ end_index = text.lower().find('introduction')
30
+ abstract = text[start_index:end_index]
31
+ break
32
+
33
+ return abstract
34
+
35
+
36
+
37
+ summarization = pipeline('summarization', model = 'pszemraj/long-t5-tglobal-base-16384-book-summary') #best summarization model i tested regarding this assessment
38
+ audiospeech = pipeline('text-to-speech', model = 'suno/bark-small') #the voice is a bit distorted but gives a good output & takes less time
39
+
40
+
41
+
42
+ def summarization_n_audiospeech(pdf_file):
43
+ abstract_text = abstract(pdf_file)
44
+
45
+ summary = summarization(abstract_text, max_length=50, min_length=10)[0]['summary_text'] #didn't know exactly what would give one sentence, so i changed multiple type the min & max lengths. for a dif article, those parameters would have to be different as well
46
+
47
+ #converting the summarization into an audio output
48
+ tts_output = audiospeech(summary)
49
+ audio_data = tts_output['audio'][0]
50
+
51
+ with BytesIO() as buffer:
52
+ sf.write(buffer, audio_data, 16000, format = 'wav')
53
+ audio_bytes = buffer.getvalue()
54
+
55
+ return summary, audio_bytes
56
+
57
+
58
+
59
+ iface = gr.Interface(
60
+ fn = summarization_n_audiospeech,
61
+ inputs = gr.File(label='upload PDF', type='binary'), #if i didn't set a type, the gradio output was an error - searched it online for the solution
62
+ outputs = [
63
+ gr.Textbox(label='Summarization of the Abstract:'),
64
+ gr.Audio(label="Audio Speech of the Abstract's Summary:")
65
+ ],
66
+ live = True
67
+ )
68
+
69
+ iface.launch()