alok94 commited on
Commit
561acc5
1 Parent(s): 30dd14f

adding project

Browse files
README.md CHANGED
@@ -1,10 +1,10 @@
1
  ---
2
- title: Generative AI
3
- emoji:
4
- colorFrom: blue
5
- colorTo: green
6
  sdk: streamlit
7
- sdk_version: 1.31.0
8
  app_file: app.py
9
  pinned: false
10
  ---
 
1
  ---
2
+ title: Generative_ai
3
+ emoji: 🐠
4
+ colorFrom: purple
5
+ colorTo: purple
6
  sdk: streamlit
7
+ sdk_version: 1.30.0
8
  app_file: app.py
9
  pinned: false
10
  ---
app.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import replicate
3
+ from src.page1 import page1
4
+ from src.page2 import page2
5
+ from src.page3 import page3
6
+ from src.page4 import page4
7
+ from src.page5 import page5
8
+ from src.page6 import page6
9
+ from src.page7 import page7
10
+ from src.page8 import page8
11
+ # import openai
12
+ import os
13
+ from dotenv import load_dotenv
14
+ load_dotenv()
15
+ REPLICATE_API_TOKEN = os.environ.get("REPLICATE_API_TOKEN")
16
+ replicate = replicate.Client(api_token=REPLICATE_API_TOKEN)
17
+ st.set_page_config(layout="wide")
18
+
19
+
20
+ # openai.api_key = os.getenv("OPENAI_API_KEY")
21
+
22
+
23
+ pages = {
24
+ # "Entry point": page1,
25
+ # "Text to image": page2,
26
+ # "Image variation": page3,
27
+ # "Image edit": page4,
28
+ "Text to Image":page5,
29
+ "Image to Text": page6,
30
+ "Image to Video":page7,
31
+ "Text to Video": page8
32
+
33
+ }
34
+
35
+ # Create the selectbox in the sidebar
36
+ page = st.sidebar.selectbox("Select a page", list(pages.keys()))
37
+
38
+ # Display the selected page
39
+ pages[page]()
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ replicate
2
+ streamlit
3
+ python-dotenv
4
+ streamlit_extras
5
+
6
+
src/__pycache__/page1.cpython-310.pyc ADDED
Binary file (1.89 kB). View file
 
src/__pycache__/page1.cpython-311.pyc ADDED
Binary file (2.04 kB). View file
 
src/__pycache__/page2.cpython-310.pyc ADDED
Binary file (1.09 kB). View file
 
src/__pycache__/page2.cpython-311.pyc ADDED
Binary file (1.73 kB). View file
 
src/__pycache__/page3.cpython-310.pyc ADDED
Binary file (1.12 kB). View file
 
src/__pycache__/page3.cpython-311.pyc ADDED
Binary file (1.86 kB). View file
 
src/__pycache__/page4.cpython-310.pyc ADDED
Binary file (1.33 kB). View file
 
src/__pycache__/page4.cpython-311.pyc ADDED
Binary file (2.4 kB). View file
 
src/__pycache__/page5.cpython-310.pyc ADDED
Binary file (2.91 kB). View file
 
src/__pycache__/page5.cpython-311.pyc ADDED
Binary file (4.61 kB). View file
 
src/__pycache__/page6.cpython-310.pyc ADDED
Binary file (2.89 kB). View file
 
src/__pycache__/page6.cpython-311.pyc ADDED
Binary file (4.16 kB). View file
 
src/__pycache__/page7.cpython-310.pyc ADDED
Binary file (3.3 kB). View file
 
src/__pycache__/page7.cpython-311.pyc ADDED
Binary file (4.59 kB). View file
 
src/__pycache__/page8.cpython-310.pyc ADDED
Binary file (3.5 kB). View file
 
src/__pycache__/page8.cpython-311.pyc ADDED
Binary file (4.76 kB). View file
 
src/__pycache__/utils.cpython-310.pyc ADDED
Binary file (1.59 kB). View file
 
src/__pycache__/utils.cpython-311.pyc ADDED
Binary file (2.08 kB). View file
 
src/page1.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ introduction = """
4
+ Brief details about OpenAI DALL-E:
5
+
6
+ ### What is OpenAI DALL-E?
7
+
8
+ DALL-E is a 12-billion parameter version of GPT-3 trained to generate images from text
9
+ descriptions, using a dataset of text–image pairs². It can create original, realistic
10
+ images and art from a text description¹.
11
+
12
+ ### What can OpenAI DALL-E do?
13
+
14
+ DALL-E has a diverse set of capabilities, including creating anthropomorphized versions
15
+ of animals and objects, combining unrelated concepts in plausible ways, rendering text,
16
+ and applying transformations to existing images². DALL-E 2 can make realistic edits to
17
+ existing images from a natural language caption³.
18
+
19
+ ### How does OpenAI DALL-E work?
20
+ DALL-E works by using a dataset of text–image pairs to learn how to generate images
21
+ from textual descriptions². It uses a 12-billion parameter version of GPT-3².
22
+
23
+ ### How can I use OpenAI DALL-E?
24
+ DALL-E is available in beta and can be used by signing up on their website⁵.
25
+
26
+ ```
27
+ Example usage:
28
+ Input: A cat made of sushi
29
+ Output: An image of a cat made out of sushi
30
+ ```
31
+
32
+ Source: Conversation with Bing, 4/29/2023
33
+
34
+ - (1) DALL·E: Creating images from text - OpenAI. https://openai.com/research/dall-e.
35
+ - (2) DALL·E 2 - OpenAI. https://openai.com/product/dall-e-2.
36
+ - (3) DALL·E - OpenAI. https://labs.openai.com/.
37
+ - (4) DALL·E now available in beta - OpenAI. https://openai.com/blog/dall-e-now-available-in-beta/.
38
+ - (5) DALL·E 2 - openai.com. https://openai.com/product/dall-e-2?ref=promptsreport.com.
39
+ - (6) OpenAI API. https://platform.openai.com/docs/models/dall-e.
40
+
41
+ """
42
+
43
+ # Define the pages
44
+ def page1():
45
+ st.title("OpenAI DALL·E")
46
+ st.markdown(introduction)
src/page2.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ # import openai
3
+ import replicate
4
+
5
+ def page2():
6
+ st.title("OpenAI DALL·E Image Generation")
7
+ st.info("""#### NOTE: you can download image by \
8
+ right clicking on the image and select save image as option""")
9
+
10
+ with st.form(key='form'):
11
+ prompt = st.text_input(label='Enter text prompt for image generation')
12
+ size = st.selectbox('Select size of the images',
13
+ ('256x256', '512x512', '1024x1024'))
14
+ num_images = st.selectbox('Enter number of images to be generated', (1,2,3,4))
15
+ submit_button = st.form_submit_button(label='Submit')
16
+
17
+ if submit_button:
18
+ if prompt:
19
+ output = replicate.run(
20
+ "stability-ai/stable-video-diffusion:3f0457e4619daac51203dedb472816fd4af51f3149fa7a9e0b5ffcf1b8172438",
21
+ input={
22
+ "input_image": "https://example.com/path/to/file/input_image"
23
+ }
24
+ )
25
+ print(output)
26
+
27
+ # response = openai.Image.create(
28
+ # prompt = prompt,
29
+ # n = num_images,
30
+ # size=size,
31
+ # )
32
+
33
+ # for idx in range(num_images):
34
+ # image_url = response["data"][idx]["url"]
35
+
36
+ # st.image(image_url, caption=f"Generated image: {idx+1}",
37
+ # use_column_width=True)
src/page3.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ # import openai
3
+ from PIL import Image
4
+ from src.utils import get_width_height, resize_image
5
+
6
+ def page3():
7
+ st.title("OpenAI DALL·E Image Variation")
8
+ st.info("""#### NOTE: you can download image by \
9
+ right clicking on the image and select save image as option""")
10
+
11
+ with st.form(key='form'):
12
+ uploaded_file = st.file_uploader("Choose an image file", type=['png', 'jpg'])
13
+ size = st.selectbox('Select size of the images',
14
+ ('256x256', '512x512', '1024x1024'))
15
+ num_images = st.selectbox('Enter number of images to be generated', (1,2,3,4))
16
+ submit_button = st.form_submit_button(label='Submit')
17
+
18
+ if submit_button:
19
+ if uploaded_file is not None:
20
+
21
+ image = Image.open(uploaded_file)
22
+
23
+ st.image(image, caption="Uploaded image", use_column_width=True)
24
+
25
+ width, height = get_width_height(size)
26
+ image = resize_image(image, width, height)
27
+
28
+ # response = openai.Image.create_variation(
29
+ # image=image,
30
+ # n = num_images,
31
+ # size=size,
32
+ # )
33
+
34
+ # for idx in range(num_images):
35
+ # image_url = response["data"][idx]["url"]
36
+
37
+ # st.image(image_url, caption=f"Generated image: {idx+1}",
38
+ # use_column_width=True)
src/page4.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ # import openai
3
+ from PIL import Image
4
+ # from src.utils import get_width_height, resize_image
5
+ # from rembg import remove
6
+
7
+ def page4():
8
+ st.title("OpenAI DALL·E Image Editing")
9
+ st.info("""#### NOTE: you can download image by \
10
+ right clicking on the image and select save image as option""")
11
+
12
+ with st.form(key='form'):
13
+ uploaded_file = st.file_uploader("Choose an image file", type=['png', 'jpg'])
14
+ mask_file = st.file_uploader("Choose an mask file", type=['png', 'jpg'])
15
+ prompt = st.text_input("Enter a text prompt")
16
+ size = st.selectbox('Select size of the images', ('256x256', '512x512', '1024x1024'))
17
+ num_images = st.selectbox('Enter number of images to be generated', (1,2,3,4))
18
+ submit_button = st.form_submit_button(label='Submit')
19
+
20
+ if submit_button:
21
+ if (uploaded_file is not None) and (mask_file is not None) and prompt:
22
+ our_image = Image.open(uploaded_file)
23
+ mask_image = Image.open(mask_file)
24
+
25
+ width, height = get_width_height(size)
26
+
27
+ our_image = resize_image(our_image, width, height)
28
+ mask_image = resize_image(mask_image, width, height)
29
+
30
+ st.image(our_image, caption="Uploaded image", use_column_width=True)
31
+ st.image(mask_image, caption="Uploaded mask", use_column_width=True)
32
+
33
+ backround_removed_mask = remove(mask_image)
34
+
35
+ st.image(backround_removed_mask, caption="backround_removed_mask",
36
+ use_column_width=True)
37
+
38
+ # response = openai.Image.create_edit(
39
+ # image=our_image,
40
+ # mask=backround_removed_mask,
41
+ # prompt=prompt,
42
+ # n=num_images,
43
+ # size=size
44
+ # )
45
+
46
+ # for idx in range(num_images):
47
+ # image_url = response["data"][idx]["url"]
48
+
49
+ # st.image(image_url, caption=f"Generated image: {idx+1}",
50
+ # use_column_width=True)
51
+
src/page5.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ # import openai
3
+ from streamlit_extras.stylable_container import stylable_container
4
+ from dotenv import load_dotenv
5
+ import replicate
6
+ import os
7
+ load_dotenv()
8
+ REPLICATE_API_TOKEN = os.environ.get("REPLICATE_API_TOKEN")
9
+ replicate = replicate.Client(api_token=REPLICATE_API_TOKEN)
10
+ streamlit_style = """
11
+ <style>
12
+ #MainMenu {visibility: hidden;}
13
+ footer {visibility: hidden;}
14
+ video{width:200px;}
15
+ .css-1wbqy5l {visibility: hidden;}
16
+ .css-15zrgzn {visibility: hidden;}
17
+ .css-klqnuk {visibility: hidden;}
18
+ .en6cib64 {visibility: hidden;}
19
+ .css-1u4fkce {visibility: hidden;}
20
+ .en6cib62 {visibility: hidden;}
21
+ .css-19rxjzo, .ef3psqc11 {
22
+ background-color: purple;
23
+ text-color: white;
24
+ }
25
+ div.stButton > button:first-child {
26
+ background-color: red;
27
+ text-weight: bold;
28
+ }
29
+ </style>
30
+ """
31
+ def page5():
32
+ # st.title("Text to Image")
33
+ with stylable_container(
34
+ key="title",
35
+ css_styles=[
36
+ """ span {
37
+ text-align: center;
38
+ padding-top: 0px;
39
+ padding-right: 0px;
40
+ padding-bottom: 0px;
41
+ padding-left: 0px;
42
+ }"""
43
+ ,
44
+ """
45
+ st-emotion-cache-0{
46
+ text-align: center;
47
+ padding-top: 0px;
48
+ padding-right: 0px;
49
+ padding-bottom: 0px;
50
+ padding-left: 0px;
51
+ }""",
52
+
53
+ """
54
+ .e1f1d6gn0{
55
+ text-align: center;
56
+ padding-top: 0px;
57
+ padding-right: 0px;
58
+ padding-bottom: 0px;
59
+ padding-left: 0px;
60
+ }
61
+ """,
62
+ ],
63
+
64
+ ):
65
+ st.markdown("<h3>Text to Image<h3>", unsafe_allow_html=True)
66
+ st.markdown(streamlit_style, unsafe_allow_html=True)
67
+
68
+ # st.info("""#### NOTE: you can download image by \
69
+ # right clicking on the image and select save image as option""")
70
+ with st.form(key='form'):
71
+ prompt = st.text_input(label='Enter text prompt for image generation')
72
+ placeholder=st.empty()
73
+ col1,col2=placeholder.columns(2)
74
+ number_of_image = col1.number_input("Number of Images to Generate", step=1, min_value=1, max_value=8, value=1, placeholder="Type a number...")
75
+ # size = st.selectbox('Select size of the images',
76
+ # ('256x256', '512x512', '1024x1024'))
77
+ # num_images = st.selectbox('Enter number of images to be generated', (1,2,3,4))
78
+
79
+ submit_button = st.form_submit_button(label='Generate Image')
80
+
81
+
82
+ if submit_button:
83
+ if prompt and number_of_image:
84
+ with st.spinner("Generating Image"):
85
+ outputs = replicate.run(
86
+ "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4",
87
+ input={
88
+ "width": 768,
89
+ "height": 448,
90
+ "prompt": prompt,
91
+ "scheduler": "K_EULER",
92
+ "num_outputs": number_of_image,
93
+ "guidance_scale": 7.5,
94
+ "num_inference_steps": 50
95
+ }
96
+ )
97
+
98
+ # for output in outputs:
99
+ # st.image(output,caption=prompt)
100
+ placeholder=st.empty()
101
+ col1,col2=placeholder.columns(2)
102
+ for index, output in enumerate(outputs):
103
+ if index%2==0:
104
+ col1.image(output,caption=prompt)
105
+ else:
106
+ col2.image(output,caption=prompt)
src/page6.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ # import openai
3
+ import replicate
4
+ import os
5
+ from dotenv import load_dotenv
6
+ from streamlit_extras.stylable_container import stylable_container
7
+ import streamlit_extras
8
+ load_dotenv()
9
+ REPLICATE_API_TOKEN = os.environ.get("REPLICATE_API_TOKEN")
10
+ replicate = replicate.Client(api_token=REPLICATE_API_TOKEN)
11
+
12
+ streamlit_style = """
13
+ <style>
14
+ #MainMenu {visibility: hidden;}
15
+ footer {visibility: hidden;}
16
+ video{width:200px;}
17
+ .css-1wbqy5l {visibility: hidden;}
18
+ .css-15zrgzn {visibility: hidden;}
19
+ .css-klqnuk {visibility: hidden;}
20
+ .en6cib64 {visibility: hidden;}
21
+ .css-1u4fkce {visibility: hidden;}
22
+ .en6cib62 {visibility: hidden;}
23
+ .css-19rxjzo, .ef3psqc11 {
24
+ background-color: purple;
25
+ text-color: white;
26
+ }
27
+ div.stButton > button:first-child {
28
+ background-color: darkgreen;
29
+ text-weight: bold;
30
+ }
31
+
32
+ </style>
33
+ """
34
+ def page6():
35
+ with stylable_container(
36
+ key="title",
37
+ css_styles=[
38
+ """ span {
39
+ text-align: center;
40
+ padding-top: 0px;
41
+ padding-right: 0px;
42
+ padding-bottom: 0px;
43
+ padding-left: 0px;
44
+ }"""
45
+ ,
46
+ """
47
+ st-emotion-cache-0{
48
+ text-align: center;
49
+ padding-top: 0px;
50
+ padding-right: 0px;
51
+ padding-bottom: 0px;
52
+ padding-left: 0px;
53
+ }""",
54
+
55
+ """
56
+ .e1f1d6gn0{
57
+ text-align: center;
58
+ padding-top: 0px;
59
+ padding-right: 0px;
60
+ padding-bottom: 0px;
61
+ padding-left: 0px;
62
+ }
63
+
64
+
65
+ """,
66
+ ],
67
+
68
+ ):
69
+ st.markdown("<h3>Image to Text</h3>", unsafe_allow_html=True) #This is under a css style
70
+ st.markdown(streamlit_style, unsafe_allow_html=True)
71
+
72
+
73
+ image_file=st.file_uploader("Select Image", type=['jpeg','jpg','png'])
74
+
75
+ if image_file is not None:
76
+ placeholder=st.empty()
77
+ col1,col2=placeholder.columns(2)
78
+ col1.text("Uploaded Image")
79
+ col1.image(image_file)
80
+ prompt = st.text_input(label='Ask question related to image')
81
+
82
+
83
+ submit_button = st.button(label='Requestion Answer')
84
+
85
+
86
+
87
+ if submit_button:
88
+ if prompt and (image_file is not None):
89
+ with st.spinner("Recognizing Image...."):
90
+ output = replicate.run(
91
+ "nateraw/video-llava:a494250c04691c458f57f2f8ef5785f25bc851e0c91fd349995081d4362322dd", input={
92
+ "image_path": image_file,
93
+ "text_prompt": prompt
94
+ }
95
+ )
96
+ print(output)
97
+
98
+ col2.text("Response")
99
+ col2.markdown(output)
100
+
101
+
src/page7.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ # import openai
3
+ import replicate
4
+ import os
5
+ from dotenv import load_dotenv
6
+ from streamlit_extras.stylable_container import stylable_container
7
+ import streamlit_extras
8
+ load_dotenv()
9
+ REPLICATE_API_TOKEN = os.environ.get("REPLICATE_API_TOKEN")
10
+ replicate = replicate.Client(api_token=REPLICATE_API_TOKEN)
11
+
12
+ streamlit_style = """
13
+ <style>
14
+ #MainMenu {visibility: hidden;}
15
+ footer {visibility: hidden;}
16
+ video{width:200px;}
17
+ .css-1wbqy5l {visibility: hidden;}
18
+ .css-15zrgzn {visibility: hidden;}
19
+ .css-klqnuk {visibility: hidden;}
20
+ .en6cib64 {visibility: hidden;}
21
+ .css-1u4fkce {visibility: hidden;}
22
+ .en6cib62 {visibility: hidden;}
23
+ .css-19rxjzo, .ef3psqc11 {
24
+ background-color: purple;
25
+ text-color: white;
26
+ }
27
+ div.stButton > button:first-child {
28
+ background-color: darkgreen;
29
+ text-weight: bold;
30
+ }
31
+
32
+ </style>
33
+ """
34
+ def page7():
35
+ with stylable_container(
36
+ key="title",
37
+ css_styles=[
38
+ """ span {
39
+ text-align: center;
40
+ padding-top: 0px;
41
+ padding-right: 0px;
42
+ padding-bottom: 0px;
43
+ padding-left: 0px;
44
+ }"""
45
+ ,
46
+ """
47
+ st-emotion-cache-0{
48
+ text-align: center;
49
+ padding-top: 0px;
50
+ padding-right: 0px;
51
+ padding-bottom: 0px;
52
+ padding-left: 0px;
53
+ }""",
54
+
55
+ """
56
+ .e1f1d6gn0{
57
+ text-align: center;
58
+ padding-top: 0px;
59
+ padding-right: 0px;
60
+ padding-bottom: 0px;
61
+ padding-left: 0px;
62
+ }
63
+
64
+
65
+ """,
66
+ ],
67
+
68
+ ):
69
+ st.markdown("<h3>Image to Video</h3>", unsafe_allow_html=True) #This is under a css style
70
+ st.markdown(streamlit_style, unsafe_allow_html=True)
71
+
72
+ image_file=st.file_uploader("Select Image", type=['jpeg','jpg','png'])
73
+ if image_file is not None:
74
+ placeholder=st.empty()
75
+ col1,col2=placeholder.columns(2)
76
+ col1.text("Uploaded Image")
77
+ col1.image(image_file)
78
+ prompt = st.text_input(label='Enter text prompt for Video generation')
79
+ submit_button = st.button(label='Generate Video')
80
+
81
+ if submit_button:
82
+ if prompt and (image_file is not None):
83
+
84
+ with st.spinner("Generating Video. It may require few minutes so please wait...."):
85
+ output = replicate.run(
86
+ "ali-vilab/i2vgen-xl:5821a338d00033abaaba89080a17eb8783d9a17ed710a6b4246a18e0900ccad4",
87
+ input={
88
+ "image": image_file,
89
+ "prompt": prompt,
90
+ "max_frames": 25,
91
+ "guidance_scale": 9,
92
+ "num_inference_steps": 50
93
+ }
94
+ )
95
+
96
+ col2.text("Generated Video from Image")
97
+ col2.video(output)
98
+ st.markdown(
99
+ """
100
+ <script>
101
+ const video = document.querySelector('video');
102
+ video.loop = true;
103
+ video.autoplay = true;
104
+ </script>
105
+ """,
106
+ unsafe_allow_html=True,
107
+ )
108
+
109
+
src/page8.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ # import openai
3
+ import replicate
4
+ import os
5
+ from dotenv import load_dotenv
6
+ from streamlit_extras.stylable_container import stylable_container
7
+ import streamlit_extras
8
+ # load_dotenv()
9
+ # REPLICATE_API_TOKEN = os.environ.get("REPLICATE_API_TOKEN")
10
+ # replicate = replicate.Client(api_token=REPLICATE_API_TOKEN)
11
+
12
+ streamlit_style = """
13
+ <style>
14
+ #MainMenu {visibility: hidden;}
15
+ footer {visibility: hidden;}
16
+ video{width:200px;}
17
+ .css-1wbqy5l {visibility: hidden;}
18
+ .css-15zrgzn {visibility: hidden;}
19
+ .css-klqnuk {visibility: hidden;}
20
+ .en6cib64 {visibility: hidden;}
21
+ .css-1u4fkce {visibility: hidden;}
22
+ .en6cib62 {visibility: hidden;}
23
+ .css-19rxjzo, .ef3psqc11 {
24
+ background-color: purple;
25
+ text-color: white;
26
+ }
27
+ div.stButton > button:first-child {
28
+ background-color: darkgreen;
29
+ text-weight: bold;
30
+ }
31
+
32
+ </style>
33
+ """
34
+ def page8():
35
+ with stylable_container(
36
+ key="title",
37
+ css_styles=[
38
+ """ span {
39
+ text-align: center;
40
+ padding-top: 0px;
41
+ padding-right: 0px;
42
+ padding-bottom: 0px;
43
+ padding-left: 0px;
44
+ }"""
45
+ ,
46
+ """
47
+ st-emotion-cache-0{
48
+ text-align: center;
49
+ padding-top: 0px;
50
+ padding-right: 0px;
51
+ padding-bottom: 0px;
52
+ padding-left: 0px;
53
+ }""",
54
+
55
+ """
56
+ .e1f1d6gn0{
57
+ text-align: center;
58
+ padding-top: 0px;
59
+ padding-right: 0px;
60
+ padding-bottom: 0px;
61
+ padding-left: 0px;
62
+ }
63
+
64
+
65
+ """,
66
+ ],
67
+
68
+ ):
69
+ st.markdown("<h3>Text to video</h3>", unsafe_allow_html=True) #This is under a css style
70
+ st.markdown(streamlit_style, unsafe_allow_html=True)
71
+
72
+ with st.form(key='form'):
73
+ prompt = st.text_input(label='Enter text prompt for Video generation')
74
+ placeholder=st.empty()
75
+ col1,col2=placeholder.columns(2)
76
+ video_length = col1.number_input("Enter video length (seconds)", step=1, min_value=1, max_value=8, value=4, placeholder="Type a number...")
77
+ submit_button = st.form_submit_button(label='Generate Video')
78
+
79
+ if submit_button:
80
+ if prompt:
81
+ placeholder=st.empty()
82
+ col1,col2=placeholder.columns(2)
83
+ with st.spinner("Generating Video from Text. Please wait...."):
84
+ output = replicate.run(
85
+ "cjwbw/text2video-zero:e671ffe4e976c0ec813f15a9836ebcfd08857ac2669af6917e3c2549307f9fae",
86
+ input={
87
+ "fps": 4,
88
+ "prompt": prompt,
89
+ "model_name": "dreamlike-art/dreamlike-photoreal-2.0",
90
+ "timestep_t0": 44,
91
+ "timestep_t1": 47,
92
+ "video_length": video_length,
93
+ "negative_prompt": "",
94
+ "motion_field_strength_x": 12,
95
+ "motion_field_strength_y": 12
96
+ }
97
+ )
98
+
99
+ col1.text("Generated Video")
100
+ col1.video(output)
101
+ st.markdown(
102
+ """
103
+ <script>
104
+ const video = document.querySelector('video');
105
+ video.loop = true;
106
+ video.autoplay = true;
107
+ </script>
108
+ """,
109
+ unsafe_allow_html=True,
110
+ )
111
+
112
+
src/utils.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import BytesIO
2
+ from PIL.PngImagePlugin import PngImageFile
3
+ from PIL.JpegImagePlugin import JpegImageFile
4
+ from typing import Union, List
5
+
6
+ ImageLike = Union[PngImageFile, JpegImageFile]
7
+
8
+ def resize_image(image: ImageLike, width: int, height: int) -> bytes:
9
+ """resize image to the given width and height
10
+
11
+ Args:
12
+ image (ImageLike): input image to resize of type PngImageFile or JpegImageFile
13
+ width (int): expected width of the image
14
+ height (int): expected height of the image
15
+
16
+ Returns:
17
+ bytes: a new image with the given width and height in PNG format
18
+ """
19
+ image = image.resize((width, height))
20
+ bytes_stream = BytesIO()
21
+ image.save(bytes_stream, format='PNG')
22
+ return bytes_stream.getvalue()
23
+
24
+
25
+ def get_width_height(size: str) -> List:
26
+ """get width and height of the image from the given size as a string, for example -
27
+ size = '512x512'
28
+
29
+ Args:
30
+ size (str): size described as '_width_x_height_' example '512x512'
31
+
32
+ Returns:
33
+ List: returns a list of interger as [width, height] extracted from the
34
+ given size
35
+ """
36
+ # size = '512x512'
37
+ return [int(val) for val in size.split("x")] # [512, 512]