File size: 3,114 Bytes
37d2675
 
 
 
 
 
 
 
 
 
 
 
 
 
97e5aa2
37d2675
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
df81f7f
37d2675
 
 
 
 
 
 
ca2c61a
b9cad30
1769f1b
37d2675
 
e27a3a3
37d2675
 
 
 
 
 
e27a3a3
37d2675
 
 
 
 
c29fadb
37d2675
 
 
 
4a15dfd
 
7d978ec
 
 
 
37d2675
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
# https://huggingface.co/spaces/itsmariamaraki/AAI-Assessment3

# Here are the imports

import gradio as gr
import PyPDF2
from PyPDF2 import PdfReader
from pdfminer.high_level import extract_pages, extract_text
from transformers import pipeline, AutoProcessor, AutoModel, AutoTokenizer
import torch
import soundfile as sf
from IPython.display import Audio
from datasets import load_dataset
from io import BytesIO
import os

# Here is the code

def abstract(pdf_file):
    pdf_bytes = BytesIO(pdf_file)
    pdf_reader = PyPDF2.PdfReader(pdf_bytes)
    
    abstract = ''
    
    for page_number in range(len(pdf_reader.pages)):
        text = pdf_reader.pages[page_number].extract_text()
        
        if 'abstract' in text.lower(): #in order to read only the abstract, i set as a start the abstract point & as an end the introduction point
            start_index = text.lower().find('abstract')  
            end_index = text.lower().find('introduction')
            abstract = text[start_index:end_index]
            break
    
    return abstract



summarization = pipeline('summarization', model = 'pszemraj/long-t5-tglobal-base-16384-book-summary') #best summarization model i tested regarding this assessment
audiospeech = pipeline('text-to-speech', model = 'suno/bark-small') #the voice is a bit distorted but gives a good output & takes less time



def summarization_n_audiospeech(pdf_file):
    abstract_text = abstract(pdf_file)

    summary = summarization(abstract_text, max_length=50, min_length=10)[0]['summary_text'] #didn't know exactly what would give one sentence, so i checked multiple times the min & max lengths regarding the 11th article. for a dif article, those parameters would probably have to be different as well
          
    fin_summary = summary.split('.')[0] + '.' #extract and print only the first sentence of the summary

    #converting the summarization into an audio output
    tts_output = audiospeech(fin_summary)
    audio_data = tts_output['audio'][0]
    
    with BytesIO() as buffer:
        sf.write(buffer, audio_data, 16000, format = 'wav')
        audio_bytes = buffer.getvalue()

    return fin_summary, audio_bytes



iface = gr.Interface(
    fn = summarization_n_audiospeech,
    inputs = gr.File(label='upload PDF', type='binary'), #if i didn't set a type, the gradio output was an error -  searched it online for the solution
    outputs = [
        gr.Textbox(label='Summarization of the Abstract:'), 
        gr.Audio(label="Audio Speech of the Abstract's Summary:")
        ],
    title = "PDF's Abstract Summarization & Audio Speech Processor",
    description = "App that generates a one-line summary of the abstract & a speech audio of this summarization -- requirements: app only accepts PDFs which include an ABSTRACT section",
        examples = [os.path.join(os.path.dirname(__file__),  'Hidden_Technical_Debt.pdf'), 
                os.path.join(os.path.dirname(__file__),  'Semiconductors.pdf'), 
                os.path.join(os.path.dirname(__file__),  'Efficient_Estimation_of_Word_Representations.pdf') 
               ]
)

iface.launch()