adi-123 commited on
Commit
58f448b
Β·
verified Β·
1 Parent(s): 5a05dae

Upload 4 files

Browse files
Files changed (4) hide show
  1. Dockerfile +22 -0
  2. app.py +137 -0
  3. requirements.txt +7 -0
  4. uploaded_image.jpg +0 -0
Dockerfile ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use a lightweight Python 3.9 image as the base
2
+ FROM python:3.9.20-slim-bullseye
3
+
4
+ # Set the working directory within the container where your application code resides
5
+ WORKDIR /app
6
+
7
+ # Copy the requirements.txt file that specifies application's dependencies
8
+ COPY requirements.txt ./
9
+
10
+ # Install the dependencies listed in requirements.txt using pip3
11
+ RUN pip3 install --upgrade pip && pip3 install -r requirements.txt
12
+
13
+ # Copy all files from the current directory (.) on the host machine
14
+ # to the /app directory within the container
15
+ COPY . .
16
+
17
+ # Expose port 8501 to make Streamlit application accessible from outside the container
18
+ EXPOSE 8501
19
+
20
+ # Define the command to execute when the container starts. This will run Streamlit
21
+ # and execute your application code located in app.py
22
+ CMD ["streamlit", "run", "app.py"]
app.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import streamlit as st
3
+ import requests
4
+ from transformers import pipeline
5
+ from typing import Dict
6
+ from together import Together
7
+
8
+ # Image-to-text
9
+ def img2txt(url: str) -> str:
10
+ print("Initializing captioning model...")
11
+ captioning_model = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")
12
+
13
+ print("Generating text from the image...")
14
+ text = captioning_model(url, max_new_tokens=20)[0]["generated_text"]
15
+
16
+ print(text)
17
+ return text
18
+
19
+ # Text-to-story generation with LLM model
20
+ def txt2story(prompt: str, top_k: int, top_p: float, temperature: float) -> str:
21
+ # Load the Together API client
22
+ client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
23
+
24
+ # Modify the prompt based on user inputs and ensure a 250-word limit
25
+ story_prompt = f"Write a short story of no more than 250 words based on the following prompt: {prompt}"
26
+
27
+ # Call the LLM model
28
+ stream = client.chat.completions.create(
29
+ model="meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
30
+ messages=[
31
+ {"role": "system", "content": '''As an experienced short story writer, write a meaningful story influenced by the provided prompt.
32
+ Ensure the story does not exceed 250 words.'''},
33
+ {"role": "user", "content": story_prompt}
34
+ ],
35
+ top_k=top_k,
36
+ top_p=top_p,
37
+ temperature=temperature,
38
+ stream=True
39
+ )
40
+
41
+ # Concatenate story chunks
42
+ story = ''
43
+ for chunk in stream:
44
+ story += chunk.choices[0].delta.content
45
+
46
+ return story
47
+
48
+ # Text-to-speech
49
+ def txt2speech(text: str) -> None:
50
+ print("Initializing text-to-speech conversion...")
51
+ API_URL = "https://api-inference.huggingface.co/models/espnet/kan-bayashi_ljspeech_vits"
52
+ headers = {"Authorization": f"Bearer {os.environ['HUGGINGFACEHUB_API_TOKEN']}"}
53
+ payloads = {'inputs': text}
54
+
55
+ response = requests.post(API_URL, headers=headers, json=payloads)
56
+
57
+ with open('audio_story.mp3', 'wb') as file:
58
+ file.write(response.content)
59
+
60
+ # Get user preferences for the story
61
+ def get_user_preferences() -> Dict[str, str]:
62
+ preferences = {}
63
+
64
+ preferences['continent'] = st.selectbox("Continent", ["North America", "Europe", "Asia", "Africa", "Australia"])
65
+ preferences['genre'] = st.selectbox("Genre", ["Science Fiction", "Fantasy", "Mystery", "Romance"])
66
+ preferences['setting'] = st.selectbox("Setting", ["Future", "Medieval times", "Modern day", "Alternate reality"])
67
+ preferences['plot'] = st.selectbox("Plot", ["Hero's journey", "Solving a mystery", "Love story", "Survival"])
68
+ preferences['tone'] = st.selectbox("Tone", ["Serious", "Light-hearted", "Humorous", "Dark"])
69
+ preferences['theme'] = st.selectbox("Theme", ["Self-discovery", "Redemption", "Love", "Justice"])
70
+ preferences['conflict'] = st.selectbox("Conflict Type", ["Person vs. Society", "Internal struggle", "Person vs. Nature", "Person vs. Person"])
71
+ preferences['twist'] = st.selectbox("Mystery/Twist", ["Plot twist", "Hidden identity", "Unexpected ally/enemy", "Time paradox"])
72
+ preferences['ending'] = st.selectbox("Ending", ["Happy", "Bittersweet", "Open-ended", "Tragic"])
73
+
74
+ return preferences
75
+
76
+ # Main function
77
+ def main():
78
+ st.set_page_config(page_title="🎨 Image-to-Audio Story 🎧", page_icon="πŸ–ΌοΈ")
79
+ st.title("Turn the Image into Audio Story")
80
+
81
+ # Allows users to upload an image file
82
+ uploaded_file = st.file_uploader("# πŸ“· Upload an image...", type=["jpg", "jpeg", "png"])
83
+
84
+ # Parameters for LLM model (in the sidebar)
85
+ st.sidebar.markdown("# LLM Inference Configuration Parameters")
86
+ top_k = st.sidebar.number_input("Top-K", min_value=1, max_value=100, value=5)
87
+ top_p = st.sidebar.number_input("Top-P", min_value=0.0, max_value=1.0, value=0.8)
88
+ temperature = st.sidebar.number_input("Temperature", min_value=0.1, max_value=2.0, value=1.5)
89
+
90
+ # Get user preferences for the story
91
+ st.markdown("## Story Preferences")
92
+ preferences = get_user_preferences()
93
+
94
+ if uploaded_file is not None:
95
+ # Reads and saves uploaded image file
96
+ bytes_data = uploaded_file.read()
97
+ with open("uploaded_image.jpg", "wb") as file:
98
+ file.write(bytes_data)
99
+
100
+ st.image(uploaded_file, caption='πŸ–ΌοΈ Uploaded Image', use_column_width=True)
101
+
102
+ # Initiates AI processing and story generation
103
+ with st.spinner("## πŸ€– AI is at Work! "):
104
+ scenario = img2txt("uploaded_image.jpg") # Extracts text from the image
105
+
106
+ # Modify the prompt to include user preferences
107
+ prompt = f"Based on the image description: '{scenario}', create a {preferences['genre']} story set in {preferences['setting']} in {preferences['continent']}. " \
108
+ f"The story should have a {preferences['tone']} tone and explore the theme of {preferences['theme']}. " \
109
+ f"The main conflict should be {preferences['conflict']}. " \
110
+ f"The story should have a {preferences['twist']} and end with a {preferences['ending']} ending."
111
+
112
+ story = txt2story(prompt, top_k, top_p, temperature) # Generates a story based on the image text, LLM params, and user preferences
113
+
114
+ txt2speech(story) # Converts the story to audio
115
+
116
+ st.markdown("---")
117
+ st.markdown("## πŸ“œ Image Caption")
118
+ st.write(scenario)
119
+
120
+ st.markdown("---")
121
+ st.markdown("## πŸ“– Story")
122
+ st.write(story)
123
+
124
+ st.markdown("---")
125
+ st.markdown("## 🎧 Audio Story")
126
+ st.audio("audio_story.mp3")
127
+
128
+ if __name__ == '__main__':
129
+ main()
130
+
131
+ # Credits
132
+ st.markdown("### Credits")
133
+ st.caption('''
134
+ Made with ❀️ by @Aditya-Neural-Net-Ninja\n
135
+ Utilizes Image-to-Text, Text Generation, Text-to-Speech Transformer Models\n
136
+ Gratitude to Streamlit, πŸ€— Spaces for Deployment & Hosting
137
+ ''')
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ tf-keras==2.17.0
2
+ tensorflow==2.17.0
3
+ transformers==4.45.1
4
+ huggingface_hub==0.25.1
5
+ pillow==10.4.0
6
+ streamlit==1.38.0
7
+ together==1.3.0
uploaded_image.jpg ADDED