import spaces import gradio as gr from gemini.gemini_extractor import GeminiExtractorConfig, GeminiExtractor from oai.oai_extractor import OAIExtractorConfig, OAIExtractor from indexify_extractor_sdk import Content gemini_extractor = GeminiExtractor() oai_extractor = OAIExtractor() def use_gemini(pdf_filepath, key): if pdf_filepath is None: raise gr.Error("Please provide some input PDF: upload a PDF file") with open(pdf_filepath, "rb") as f: pdf_data = f.read() content = Content(content_type="application/pdf", data=pdf_data) config = GeminiExtractorConfig(prompt="Extract all text from the document.", model_name="gemini-1.5-flash", key=key) result = gemini_extractor.extract(content, config) return result with gr.Blocks(title="PDF data extraction with Gemini & Indexify") as gemini_demo: gr.HTML("

PDF data extraction with Gemini & Indexify

") gr.HTML("

Indexify is a scalable realtime and continuous indexing and structured extraction engine for unstructured data to build generative AI applications

") gr.HTML("

If you like this demo, please ⭐ Star us on GitHub!

") gr.HTML("

Here's an example notebook that demonstrates how to build a continuous extraction pipeline with Indexify

") with gr.Row(): with gr.Column(): gr.HTML( "

Step 1: Upload a PDF file from local storage.

" "

Use this demo for single PDF file only. " "You can extract from PDF files continuously and try various other extractors locally with " "Indexify.

" ) pdf_file_1 = gr.File(type="filepath") gr.HTML("

Step 2: Enter your API key.

") key_1 = gr.Textbox(info="Please enter your GEMINI_API_KEY", label="Key:") with gr.Column(): gr.HTML("

Step 3: Run the extractor.

") go_button_1 = gr.Button(value="Run Gemini extractor", variant="primary") model_output_text_box_1 = gr.Textbox(label="Extractor Output", elem_id="model_output_text_box_1") with gr.Row(): gr.HTML("

Developed with 🫶 by Indexify | a Tensorlake product

") go_button_1.click(fn=use_gemini, inputs=[pdf_file_1, key_1], outputs=[model_output_text_box_1]) def use_openai(pdf_filepath, key): if pdf_filepath is None: raise gr.Error("Please provide some input PDF: upload a PDF file") with open(pdf_filepath, "rb") as f: pdf_data = f.read() content = Content(content_type="application/pdf", data=pdf_data) config = OAIExtractorConfig(prompt="Extract all text from the document.", model_name="gpt-4o", key=key) result = oai_extractor.extract(content, config) return result with gr.Blocks(title="PDF data extraction with OpenAI & Indexify") as openai_demo: gr.HTML("

PDF data extraction with OpenAI & Indexify

") gr.HTML("

Indexify is a scalable realtime and continuous indexing and structured extraction engine for unstructured data to build generative AI applications

") gr.HTML("

If you like this demo, please ⭐ Star us on GitHub!

") gr.HTML("

Here's an example notebook that demonstrates how to build a continuous extraction pipeline with Indexify

") with gr.Row(): with gr.Column(): gr.HTML( "

Step 1: Upload a PDF file from local storage.

" "

Use this demo for single PDF file only. " "You can extract from PDF files continuously and try various other extractors locally with " "Indexify.

" ) pdf_file_2 = gr.File(type="filepath") gr.HTML("

Step 2: Enter your API key.

") key_2 = gr.Textbox(info="Please enter your OPENAI_API_KEY", label="Key:") with gr.Column(): gr.HTML("

Step 3: Run the extractor.

") go_button_2 = gr.Button(value="Run OpenAI extractor", variant="primary") model_output_text_box_2 = gr.Textbox(label="Extractor Output", elem_id="model_output_text_box_2") with gr.Row(): gr.HTML("

Developed with 🫶 by Indexify | a Tensorlake product

") go_button_2.click(fn=use_openai, inputs=[pdf_file_2, key_2], outputs=[model_output_text_box_2]) demo = gr.TabbedInterface([gemini_demo, openai_demo], ["Gemini Extractor", "OpenAI Extractor"], theme=gr.themes.Soft()) demo.queue() demo.launch()