from transformers import AutoTokenizer, AutoModelForSeq2SeqLM from pathlib import Path import pdfplumber import gradio as gr def respond(pdf_file, upper_page=0): pdf_file = Path(pdf_file.name) all_text = "" with pdfplumber.open(pdf_file) as pdf: total_pages = len(pdf.pages) for idx, pdf_page in enumerate(pdf.pages): single_page_text = pdf_page.extract_text() all_text = all_text + "\n" + single_page_text #print(idx / total_pages) tokenizer=AutoTokenizer.from_pretrained('Einmalumdiewelt/T5-Base_GNAD') model=AutoModelForSeq2SeqLM.from_pretrained('Einmalumdiewelt/T5-Base_GNAD', return_dict=True) inputs=tokenizer.encode("summarize: " +all_text, return_tensors='pt', max_length=512, truncation=True) output = model.generate(inputs, min_length=70, max_length=80) summary=tokenizer.decode(output[0]) return summary, all_text with gr.Blocks() as demo: title = """

Talk with your document

""" gr.HTML(title) with gr.Row(): with gr.Column(): file_input = gr.File(label="PDF File", type="file") page_input = gr.Textbox(label="Page Limit") summarize_button = gr.Button(label="Summarize") with gr.Column(): summary_output = gr.Textbox(label="Summarized Text") with gr.Column(): text_output =gr.Textbox(label="Extracted Text") summarize_button.click(respond, inputs=[file_input, page_input], outputs=[summary_output, text_output]) demo.launch(debug=True)