generative_ai / src /page7.py
alok94's picture
made chainges in environ
4baf6f4
import streamlit as st
# import openai
import replicate
import os
from dotenv import load_dotenv
from streamlit_extras.stylable_container import stylable_container
import streamlit_extras
load_dotenv()
REPLICATE_API_TOKEN = os.environ.get("REPLICATE_API_TOKEN")
replicate = replicate.Client(api_token=REPLICATE_API_TOKEN)
streamlit_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
video{width:200px;}
.css-1wbqy5l {visibility: hidden;}
.css-15zrgzn {visibility: hidden;}
.css-klqnuk {visibility: hidden;}
.en6cib64 {visibility: hidden;}
.css-1u4fkce {visibility: hidden;}
.en6cib62 {visibility: hidden;}
.css-19rxjzo, .ef3psqc11 {
background-color: purple;
text-color: white;
}
div.stButton > button:first-child {
background-color: darkgreen;
text-weight: bold;
}
</style>
"""
def page7():
with stylable_container(
key="title",
css_styles=[
""" span {
text-align: center;
padding-top: 0px;
padding-right: 0px;
padding-bottom: 0px;
padding-left: 0px;
}"""
,
"""
st-emotion-cache-0{
text-align: center;
padding-top: 0px;
padding-right: 0px;
padding-bottom: 0px;
padding-left: 0px;
}""",
"""
.e1f1d6gn0{
text-align: center;
padding-top: 0px;
padding-right: 0px;
padding-bottom: 0px;
padding-left: 0px;
}
""",
],
):
st.markdown("<h3>Image to Video</h3>", unsafe_allow_html=True) #This is under a css style
st.markdown(streamlit_style, unsafe_allow_html=True)
image_file=st.file_uploader("Select Image", type=['jpeg','jpg','png'])
if image_file is not None:
placeholder=st.empty()
col1,col2=placeholder.columns(2)
col1.text("Uploaded Image")
col1.image(image_file)
prompt = st.text_input(label='Enter text prompt for Video generation')
submit_button = st.button(label='Generate Video')
if submit_button:
if prompt and (image_file is not None):
with st.spinner("Generating Video. It may require few minutes so please wait...."):
output = replicate.run(
"ali-vilab/i2vgen-xl:5821a338d00033abaaba89080a17eb8783d9a17ed710a6b4246a18e0900ccad4",
input={
"image": image_file,
"prompt": prompt,
"max_frames": 25,
"guidance_scale": 9,
"num_inference_steps": 50
}
)
col2.text("Generated Video from Image")
col2.video(output)
st.markdown(
"""
<script>
const video = document.querySelector('video');
video.loop = true;
video.autoplay = true;
</script>
""",
unsafe_allow_html=True,
)