Mr-Vicky-01 commited on
Commit
518a610
β€’
1 Parent(s): 7330548

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +103 -61
app.py CHANGED
@@ -1,65 +1,107 @@
1
- from PIL import Image
2
- from transformers import BlipProcessor, BlipForConditionalGeneration
3
- from langchain import HuggingFaceHub, LLMChain, PromptTemplate
4
- import gradio as gr
5
- import numpy as np
6
- import requests
7
  import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
- # Load image captioning model
10
- processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
11
- model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
12
-
13
- def generate_caption_from_image(image_path):
14
- # Process the image and generate caption
15
- raw_image = Image.open(image_path).convert("RGB")
16
- inputs = processor(raw_image, return_tensors="pt")
17
- out = model.generate(**inputs)
18
- caption = processor.decode(out[0], skip_special_tokens=True)
19
- return caption
20
-
21
- def generate_story_from_caption(caption):
22
- # Generate story based on caption
23
- llm = HuggingFaceHub(huggingfacehub_api_token=os.getenv('HUGGING_FACE'),
24
- repo_id="tiiuae/falcon-7b-instruct",
25
- verbose=False,
26
- model_kwargs={"temperature": 0.2, "max_new_tokens": 4000})
27
- template = """You are a story teller.
28
- You get a scenario as an input text, and generate a short story out of it.
29
- Context: {scenario}
30
- Story:"""
31
- prompt = PromptTemplate(template=template, input_variables=["scenario"])
32
- # Let's create our LLM chain now
33
- chain = LLMChain(prompt=prompt, llm=llm)
34
- story = chain.run(caption)
35
- start_index = story.find("Story:") + len("Story:")
36
- # Extract the text after "Story:"
37
- story = story[start_index:].strip()
38
- return story
39
-
40
- def text_to_speech(text):
41
- headers = {"Authorization": f"Bearer {os.getenv('HUGGING_FACE')}"}
42
- payload = {"inputs": text}
43
  API_URL = "https://api-inference.huggingface.co/models/espnet/kan-bayashi_ljspeech_vits"
44
- response = requests.post(API_URL, headers=headers, json=payload)
 
 
 
45
 
46
- if response.status_code == 200:
47
- with open("output.mp3", "wb") as f:
48
- f.write(response.content)
49
- return "output.mp3"
50
-
51
- def generate_story_from_image(image_input):
52
- input_image = Image.fromarray(image_input)
53
- input_image.save("input_image.jpg")
54
- image_path = 'input_image.jpg'
55
- caption = generate_caption_from_image(image_path)
56
- story = generate_story_from_caption(caption)
57
- audio = text_to_speech(story)
58
- return audio
59
-
60
- # Define the input and output components
61
- inputs = gr.Image(label="Image")
62
- outputs = gr.Audio(label="Story Audio")
63
-
64
- # Create the Gradio interface
65
- gr.Interface(fn=generate_story_from_image, inputs=inputs, outputs=outputs, title="Story Teller").launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
+ import streamlit as st
3
+ import requests
4
+ from transformers import pipeline
5
+ import openai
6
+ from langchain import LLMChain, PromptTemplate
7
+ from langchain import HuggingFaceHub
8
+
9
+ # Suppressing all warnings
10
+ import warnings
11
+ warnings.filterwarnings("ignore")
12
+
13
+ api_token = os.getenv('HUGGING_FACE')
14
+
15
+ # Image-to-text
16
+ def img2txt(url):
17
+ print("Initializing captioning model...")
18
+ captioning_model = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")
19
+
20
+ print("Generating text from the image...")
21
+ text = captioning_model(url, max_new_tokens=20)[0]["generated_text"]
22
+
23
+ print(text)
24
+ return text
25
+
26
+ # Text-to-story
27
+
28
+ model = "tiiuae/falcon-7b-instruct"
29
+ llm = HuggingFaceHub(
30
+ huggingfacehub_api_token = api_token,
31
+ repo_id = model,
32
+ verbose = False,
33
+ model_kwargs = {"temperature":0.2, "max_new_tokens": 4000})
34
+
35
+ def generate_story(scenario, llm):
36
+ template= """You are a story teller.
37
+ You get a scenario as an input text, and generates a short story out of it.
38
+ Context: {scenario}
39
+ Story:
40
+ """
41
+ prompt = PromptTemplate(template=template, input_variables=["scenario"])
42
+ #Let's create our LLM chain now
43
+ chain = LLMChain(prompt=prompt, llm=llm)
44
+ story = chain.predict(scenario=scenario)
45
+ start_index = story.find("Story:") + len("Story:")
46
+
47
+ # Extract the text after "Story:"
48
+ story = story[start_index:].strip()
49
+ return story
50
+
51
 
52
+ # Text-to-speech
53
+ def txt2speech(text):
54
+ print("Initializing text-to-speech conversion...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  API_URL = "https://api-inference.huggingface.co/models/espnet/kan-bayashi_ljspeech_vits"
56
+ headers = {"Authorization": f"Bearer {api_token }"}
57
+ payloads = {'inputs': text}
58
+
59
+ response = requests.post(API_URL, headers=headers, json=payloads)
60
 
61
+ with open('audio_story.mp3', 'wb') as file:
62
+ file.write(response.content)
63
+
64
+
65
+
66
+ # Streamlit web app main function
67
+ def main():
68
+ st.set_page_config(page_title="🎨 Image-to-Audio Story 🎧", page_icon="πŸ–ΌοΈ")
69
+ st.title("Turn the Image into Audio Story")
70
+
71
+ # Allows users to upload an image file
72
+ uploaded_file = st.file_uploader("# πŸ“· Upload an image...", type=["jpg", "jpeg", "png"])
73
+
74
+ # Parameters for LLM model (in the sidebar)
75
+ st.sidebar.markdown("# LLM Inference Configuration Parameters")
76
+ top_k = st.sidebar.number_input("Top-K", min_value=1, max_value=100, value=5)
77
+ top_p = st.sidebar.number_input("Top-P", min_value=0.0, max_value=1.0, value=0.8)
78
+ temperature = st.sidebar.number_input("Temperature", min_value=0.1, max_value=2.0, value=1.5)
79
+
80
+ if uploaded_file is not None:
81
+ # Reads and saves uploaded image file
82
+ bytes_data = uploaded_file.read()
83
+ with open("uploaded_image.jpg", "wb") as file:
84
+ file.write(bytes_data)
85
+
86
+ st.image(uploaded_file, caption='πŸ–ΌοΈ Uploaded Image', use_column_width=True)
87
+
88
+ # Initiates AI processing and story generation
89
+ with st.spinner("## πŸ€– AI is at Work! "):
90
+ scenario = img2txt("uploaded_image.jpg") # Extracts text from the image
91
+ story = generate_story(scenario, llm) # Generates a story based on the image text, LLM params
92
+ txt2speech(story) # Converts the story to audio
93
+
94
+ st.markdown("---")
95
+ st.markdown("## πŸ“œ Image Caption")
96
+ st.write(scenario)
97
+
98
+ st.markdown("---")
99
+ st.markdown("## πŸ“– Story")
100
+ st.write(story)
101
+
102
+ st.markdown("---")
103
+ st.markdown("## 🎧 Audio Story")
104
+ st.audio("audio_story.mp3")
105
+
106
+ if __name__ == '__main__':
107
+ main()