rexsimiloluwah commited on
Commit
bbfb60a
1 Parent(s): 4ddd43e

added more applications

Browse files
app.py CHANGED
@@ -6,7 +6,6 @@ from apps.asr import (
6
  )
7
  from apps.object_detection import obj_detection_interface
8
  from apps.image_captioning import img_captioning_interface
9
- from apps.multimodal_visual_qa import multimodal_visual_qa_interface
10
  from apps.ner import ner_interface
11
 
12
  app = gr.Blocks()
@@ -18,7 +17,6 @@ with app:
18
  file_transcribe_interface,
19
  obj_detection_interface,
20
  img_captioning_interface,
21
- multimodal_visual_qa_interface,
22
  ner_interface
23
  ],
24
  [
@@ -26,7 +24,6 @@ with app:
26
  "Transcribe from Audio File",
27
  "Detect Objects from an Image",
28
  "Generate a Caption for an Image",
29
- "Perform QA on an Image",
30
  "Named Entity Recogntion"
31
  ]
32
  )
 
6
  )
7
  from apps.object_detection import obj_detection_interface
8
  from apps.image_captioning import img_captioning_interface
 
9
  from apps.ner import ner_interface
10
 
11
  app = gr.Blocks()
 
17
  file_transcribe_interface,
18
  obj_detection_interface,
19
  img_captioning_interface,
 
20
  ner_interface
21
  ],
22
  [
 
24
  "Transcribe from Audio File",
25
  "Detect Objects from an Image",
26
  "Generate a Caption for an Image",
 
27
  "Named Entity Recogntion"
28
  ]
29
  )
apps/__pycache__/image_captioning.cpython-311.pyc ADDED
Binary file (1.36 kB). View file
 
apps/__pycache__/multimodal_visual_qa.cpython-311.pyc ADDED
Binary file (1.49 kB). View file
 
apps/__pycache__/ner.cpython-311.pyc ADDED
Binary file (842 Bytes). View file
 
apps/__pycache__/object_detection.cpython-311.pyc ADDED
Binary file (4.32 kB). View file
 
apps/multimodal_visual_qa.py CHANGED
@@ -1,24 +1,24 @@
1
- import gradio as gr
2
- from transformers import AutoProcessor
3
- from transformers import BlipForQuestionAnswering
4
 
5
- model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base")
6
- processor = AutoProcessor.from_pretrained("Salesforce/blip-vqa-base")
7
 
8
- def process_image(image, question: str):
9
- inputs = processor(image, question, return_tensors="pt")
10
- output = model.generate(**inputs)
11
- answer = processor.decode(output[0], skip_special_tokens=True)
12
 
13
- return answer
14
 
15
- multimodal_visual_qa_interface = gr.Interface(
16
- fn=process_image,
17
- inputs=[
18
- gr.Image(label="Input Image", type="pil"),
19
- gr.Textbox(label="Enter question to prompt the image")
20
- ],
21
- outputs=gr.Textbox(label="Answer"),
22
- title="Multimodal Visual QA Application",
23
- description="This app can help you ask questions about an image"
24
- )
 
1
+ # import gradio as gr
2
+ # from transformers import AutoProcessor
3
+ # from transformers import BlipForQuestionAnswering
4
 
5
+ # model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base")
6
+ # processor = AutoProcessor.from_pretrained("Salesforce/blip-vqa-base")
7
 
8
+ # def process_image(image, question: str):
9
+ # inputs = processor(image, question, return_tensors="pt")
10
+ # output = model.generate(**inputs)
11
+ # answer = processor.decode(output[0], skip_special_tokens=True)
12
 
13
+ # return answer
14
 
15
+ # multimodal_visual_qa_interface = gr.Interface(
16
+ # fn=process_image,
17
+ # inputs=[
18
+ # gr.Image(label="Input Image", type="pil"),
19
+ # gr.Textbox(label="Enter question to prompt the image")
20
+ # ],
21
+ # outputs=gr.Textbox(label="Answer"),
22
+ # title="Multimodal Visual QA Application",
23
+ # description="This app can help you ask questions about an image"
24
+ # )
apps/summarizer.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline
3
+
4
+ summarizer = pipeline("summarization", model="Falconsai/text_summarization")
5
+
6
+ def summarize(text: str, max_length: int, min_length: int, do_sample: bool=False):
7
+ result = summarizer(text, max_length, min_length, do_sample)
8
+ return result["summary_text"]
9
+
10
+ summarizer_interface = gr.Interface(
11
+ summarize,
12
+ inputs=[
13
+ gr.Textbox(label="Enter the text to be summarized"),
14
+ gr.Slider(minimum=0, maximum=3000, step=100, label="Max Length", value=1000),
15
+ gr.Slider(minimum=0, maximum=100, step=10, label="Min Length", value=30),
16
+ gr.Checkbox(label="Do Sample", value=False)
17
+ ],
18
+ outputs=gr.Textbox(label="Summarized Text")
19
+ )