from transformers import pipeline from langchain import PromptTemplate, LLMChain, OpenAI import requests import os from dotenv import load_dotenv import streamlit as st load_dotenv(find_dotenv()) HUGGINGPHASE_API_TOKEN = os.getenv("HUGGINGPHASE_API_TOKEN") # Image2Text def img2text(url): image_to_text = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base") text = img2text(url)[0]["generated_text"] print(text) return text # LLM def generate_story(scenario): template = """ you are a story teller you can generate a short story based on simple narrative, the story should be more than 20 words; CONTEXT:{scenario}, STORY: """ prompt = PromptTemplate(template=template, input_variable=["scenario"]) story_llm = LLMChain(llm=OpenAI(model_name="gpt-3.5-turbo", temperature=1), prompt=prompt, verbose=True) story = story_llm.predict(scenario=scenario) print(story) return story # Text to Speech def text2speech(message): API_URL = "https://api-inference.huggingface.co/models/espnet/kan-bayashi_ljspeech_vits" headers = {"Authorization": f"Bearer {HUGGINGPHASE_API_TOKEN}"} payloads = { "inputs": message } response = requests.post(API_URL, headers=headers, json=payloads) with open('audio.flac', 'wb') as file: file.write(response.content) def main(): st.set_page_config(page_title="img 2 Audio story", page_icon='🤖') st.header("Turn img into an audio story") uploaded_file = st.file_uploader("Choose an image...", type="jpg") if uploaded_file is not None: print(uploaded_file) with open(uploaded_file.name, "wb") as file: file.write(uploaded_file.getvalue()) st.image(uploaded_file, caption="Uploaded Image.", use_column_width=True) scenario = img2text(uploaded_file.name) story = generate_story(scenario) text2speech(story) with st.expander("Scenario"): st.write(scenario) with st.expander("Story"): st.write(story) st.audio("audio.flac") if __name__ == '__main__': main()