rajapinja commited on
Commit
f797267
1 Parent(s): c32d5cf

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -0
app.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ from PIL import Image
4
+ import scipy.io.wavfile as wavfile
5
+
6
+ # Use a pipeline as a high-level helper
7
+ from transformers import pipeline
8
+
9
+ device = "cuda" if torch.cuda.is_available() else "cpu"
10
+
11
+ # model_path = ("../Models/models--Salesforce--blip-image-captioning-large"
12
+ # "/snapshots/2227ac38c9f16105cb0412e7cab4759978a8fd90")
13
+ #
14
+ # tts_model_path = ("../Models/models--kakao-enterprise--vits-ljs/snapshots"
15
+ # "/3bcb8321394f671bd948ebf0d086d694dda95464")
16
+
17
+ caption_image = pipeline("image-to-text",
18
+ model="Salesforce/blip-image-captioning-large", device=device)
19
+
20
+ narrator = pipeline("text-to-speech",
21
+ model="kakao-enterprise/vits-ljs")
22
+
23
+ # caption_image = pipeline("image-to-text",
24
+ # model=model_path, device=device)
25
+ #
26
+ # narrator = pipeline("text-to-speech",
27
+ # model=tts_model_path)
28
+
29
+ def generate_audio(text):
30
+ # Generate the narrated text
31
+ narrated_text = narrator(text)
32
+
33
+ # Save the audio to a WAV file
34
+ wavfile.write("output.wav", rate=narrated_text["sampling_rate"],
35
+ data=narrated_text["audio"][0])
36
+ # Return the path to the saved audio file
37
+ return "output.wav"
38
+
39
+
40
+ def caption_my_image(pil_image):
41
+ semantics = caption_image(images=pil_image)[0]['generated_text']
42
+ return generate_audio(semantics)
43
+
44
+ demo = gr.Interface(fn=caption_my_image,
45
+ inputs=[gr.Image(label="Select Image",type="pil")],
46
+ outputs=[gr.Audio(label="Image Caption")],
47
+ title="@LaraidSolutions Project 8: Image Captioning",
48
+ description="THIS APPLICATION WILL BE USED TO CAPTION THE IMAGE.")
49
+ demo.launch()