Ari commited on
Commit
7f2b3e5
1 Parent(s): 11422b8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -51
app.py CHANGED
@@ -1,65 +1,56 @@
1
- #**************** IMPORT PACKAGES ********************
2
  import gradio as gr
3
- import numpy as np
4
- import pytesseract as pt
5
- import pdf2image
6
  import os
7
-
8
- import tempfile
9
- from fpdf import FPDF
10
- import re
11
  import nltk
12
- from nltk.tokenize import sent_tokenize
13
- from nltk.tokenize import word_tokenize
14
- import pdfkit
15
- import yake
16
- from zipfile import ZipFile
17
  from gtts import gTTS
18
- from transformers import AutoTokenizer, AutoModelForPreTraining, AutoModel, AutoConfig
19
- from summarizer import Summarizer, TransformerSummarizer
20
- from transformers import pipelines
21
  from pdfminer.high_level import extract_text
22
 
23
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
24
  nltk.download('punkt')
25
 
26
- model_name = 'nlpaueb/legal-bert-base-uncased'
27
  tokenizer = AutoTokenizer.from_pretrained("facebook/bart-large-cnn")
28
-
29
  model = AutoModelForSeq2SeqLM.from_pretrained("facebook/bart-large-cnn")
30
 
31
- def pdf_to_text(text, PDF):
32
- Min = int(20)
33
- if text == "":
34
- # The setup of huggingface.co
35
- file_obj = PDF
36
- text = extract_text(file_obj.name)
37
- inputs = tokenizer([text], max_length=1024, return_tensors="pt")
38
-
39
- Min = int(Min)
40
- # Generate Summary
41
- summary_ids = model.generate(inputs["input_ids"], num_beams=2, min_length=Min, max_length=Min+1000)
42
- output_text = tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
43
-
44
- else:
45
- inputs = tokenizer([text], max_length=1024, return_tensors="pt")
46
- # Generate Summary
47
- summary_ids = model.generate(inputs["input_ids"], num_beams=2, min_length=Min, max_length=Min+1000)
48
- output_text = tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
49
-
50
- pdf = FPDF()
51
- pdf.add_page()
52
- pdf.set_font("Times", size = 12)
53
- f = output_text
54
- pdf.multi_cell(190, 10, txt = f, align = 'C')
55
- pdf.output("legal.pdf")
56
-
57
- myobj = gTTS(text=output_text, lang='en', slow=False)
58
- myobj.save("legal.wav")
59
-
60
- return "legal.wav", output_text, "legal.pdf"
61
 
62
- iface = gr.Interface(fn=pdf_to_text, inputs=["text", "file"], outputs=["audio", "text", "file"])
 
 
 
 
 
 
 
 
63
 
64
  if __name__ == "__main__":
65
- iface.launch() # Removed 'share=True'
 
 
1
  import gradio as gr
 
 
 
2
  import os
 
 
 
 
3
  import nltk
4
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
5
+ from fpdf import FPDF
 
 
 
6
  from gtts import gTTS
 
 
 
7
  from pdfminer.high_level import extract_text
8
 
 
9
  nltk.download('punkt')
10
 
11
+ # Load the models and tokenizers once, not every time the function is called
12
  tokenizer = AutoTokenizer.from_pretrained("facebook/bart-large-cnn")
 
13
  model = AutoModelForSeq2SeqLM.from_pretrained("facebook/bart-large-cnn")
14
 
15
+ # Main processing function
16
+ def pdf_to_text(text, PDF, min_length=20):
17
+ try:
18
+ # Extract text from PDF if no input text provided
19
+ if text == "":
20
+ text = extract_text(PDF.name)
21
+
22
+ # Tokenize text
23
+ inputs = tokenizer([text], max_length=1024, return_tensors="pt")
24
+ min_length = int(min_length)
25
+
26
+ # Generate summary
27
+ summary_ids = model.generate(inputs["input_ids"], num_beams=2, min_length=min_length, max_length=min_length+1000)
28
+ output_text = tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)[0]
29
+
30
+ # Save summarized text to PDF
31
+ pdf = FPDF()
32
+ pdf.add_page()
33
+ pdf.set_font("Times", size=12)
34
+ pdf.multi_cell(190, 10, txt=output_text, align='C')
35
+ pdf_output_path = "legal.pdf"
36
+ pdf.output(pdf_output_path)
37
+
38
+ # Convert summarized text to audio
39
+ audio_output_path = "legal.wav"
40
+ tts = gTTS(text=output_text, lang='en', slow=False)
41
+ tts.save(audio_output_path)
42
+
43
+ return audio_output_path, output_text, pdf_output_path
 
44
 
45
+ except Exception as e:
46
+ return None, f"An error occurred: {str(e)}", None
47
+
48
+ # Gradio interface
49
+ iface = gr.Interface(
50
+ fn=pdf_to_text,
51
+ inputs=["text", gr.inputs.File(label="Upload PDF"), gr.inputs.Slider(minimum=10, maximum=100, step=10, default=20, label="Summary Minimum Length")],
52
+ outputs=["audio", "text", "file"]
53
+ )
54
 
55
  if __name__ == "__main__":
56
+ iface.launch()