Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import pipeline
|
2 |
+
from transformers.utils import logging
|
3 |
+
from helper import ignore_warnings
|
4 |
+
|
5 |
+
logging.set_verbosity_error()
|
6 |
+
ignore_warnings()
|
7 |
+
|
8 |
+
od_pipe = pipeline("object-detection", model="facebook/detr-resnet-50")
|
9 |
+
tts_pipe = pipeline("text-to-speech", model="kakao-enterprise/vits-ljs")
|
10 |
+
|
11 |
+
def detect_objects(pil_image):
|
12 |
+
pipeline_output = od_pipe(pil_image)
|
13 |
+
processed_image = render_results_in_image(pil_image, pipeline_output)
|
14 |
+
text = summarize_predictions_natural_language(pipeline_output)
|
15 |
+
narrated_text = tts_pipe(text)
|
16 |
+
sr=narrated_text["sampling_rate"]
|
17 |
+
audio = narrated_text["audio"][0]
|
18 |
+
|
19 |
+
return processed_image, text,(sr, audio)
|
20 |
+
|
21 |
+
demo = gr.Interface(
|
22 |
+
fn=detect_objects,
|
23 |
+
inputs=gr.Image(label="Input image",
|
24 |
+
type="pil"),
|
25 |
+
outputs=[gr.Image(label="Output image with predicted instances",
|
26 |
+
type="pil"), gr.Textbox(label='Description of detected objects', lines=3),
|
27 |
+
gr.Audio(label='Play audio to listen about the detected objectes in the image')]
|
28 |
+
|
29 |
+
# outputs=["audio"]
|
30 |
+
)
|
31 |
+
|
32 |
+
demo.launch(debug=True)
|