alok94 commited on
Commit
fb96795
1 Parent(s): cc4cd9f

genai first commit

Browse files
README.md CHANGED
@@ -1,8 +1,8 @@
1
  ---
2
- title: Generative Ai
3
- emoji: 🏢
4
- colorFrom: green
5
- colorTo: blue
6
  sdk: streamlit
7
  sdk_version: 1.30.0
8
  app_file: app.py
 
1
  ---
2
+ title: Text To Video
3
+ emoji: 🐠
4
+ colorFrom: purple
5
+ colorTo: purple
6
  sdk: streamlit
7
  sdk_version: 1.30.0
8
  app_file: app.py
app.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from src.page1 import page1
3
+ from src.page2 import page2
4
+ from src.page3 import page3
5
+ from src.page4 import page4
6
+ from src.page5 import page5
7
+ from src.page6 import page6
8
+ from src.page7 import page7
9
+ from src.page8 import page8
10
+ # import openai
11
+ import os
12
+ from dotenv import load_dotenv
13
+ load_dotenv()
14
+ st.set_page_config(layout="wide")
15
+
16
+
17
+ # openai.api_key = os.getenv("OPENAI_API_KEY")
18
+
19
+
20
+ pages = {
21
+ # "Entry point": page1,
22
+ # "Text to image": page2,
23
+ # "Image variation": page3,
24
+ # "Image edit": page4,
25
+ "Text to Image":page5,
26
+ "Image to Text": page6,
27
+ "Image to Video":page7,
28
+ "Text to Video": page8
29
+
30
+ }
31
+
32
+ # Create the selectbox in the sidebar
33
+ page = st.sidebar.selectbox("Select a page", list(pages.keys()))
34
+
35
+ # Display the selected page
36
+ pages[page]()
src/.env ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ OPENAI_API_KEY="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"
2
+ REPLICATE_API_TOKEN='r8_4fktoXrDGkgHY8uw1XlVtQJKQlAILKv0iBmPI'
src/__pycache__/page1.cpython-310.pyc ADDED
Binary file (1.89 kB). View file
 
src/__pycache__/page2.cpython-310.pyc ADDED
Binary file (1.09 kB). View file
 
src/__pycache__/page3.cpython-310.pyc ADDED
Binary file (1.12 kB). View file
 
src/__pycache__/page4.cpython-310.pyc ADDED
Binary file (1.33 kB). View file
 
src/__pycache__/page5.cpython-310.pyc ADDED
Binary file (2.91 kB). View file
 
src/__pycache__/page6.cpython-310.pyc ADDED
Binary file (2.89 kB). View file
 
src/__pycache__/page7.cpython-310.pyc ADDED
Binary file (3.3 kB). View file
 
src/__pycache__/page8.cpython-310.pyc ADDED
Binary file (3.5 kB). View file
 
src/__pycache__/utils.cpython-310.pyc ADDED
Binary file (1.59 kB). View file
 
src/page1.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ introduction = """
4
+ Brief details about OpenAI DALL-E:
5
+
6
+ ### What is OpenAI DALL-E?
7
+
8
+ DALL-E is a 12-billion parameter version of GPT-3 trained to generate images from text
9
+ descriptions, using a dataset of text–image pairs². It can create original, realistic
10
+ images and art from a text description¹.
11
+
12
+ ### What can OpenAI DALL-E do?
13
+
14
+ DALL-E has a diverse set of capabilities, including creating anthropomorphized versions
15
+ of animals and objects, combining unrelated concepts in plausible ways, rendering text,
16
+ and applying transformations to existing images². DALL-E 2 can make realistic edits to
17
+ existing images from a natural language caption³.
18
+
19
+ ### How does OpenAI DALL-E work?
20
+ DALL-E works by using a dataset of text–image pairs to learn how to generate images
21
+ from textual descriptions². It uses a 12-billion parameter version of GPT-3².
22
+
23
+ ### How can I use OpenAI DALL-E?
24
+ DALL-E is available in beta and can be used by signing up on their website⁵.
25
+
26
+ ```
27
+ Example usage:
28
+ Input: A cat made of sushi
29
+ Output: An image of a cat made out of sushi
30
+ ```
31
+
32
+ Source: Conversation with Bing, 4/29/2023
33
+
34
+ - (1) DALL·E: Creating images from text - OpenAI. https://openai.com/research/dall-e.
35
+ - (2) DALL·E 2 - OpenAI. https://openai.com/product/dall-e-2.
36
+ - (3) DALL·E - OpenAI. https://labs.openai.com/.
37
+ - (4) DALL·E now available in beta - OpenAI. https://openai.com/blog/dall-e-now-available-in-beta/.
38
+ - (5) DALL·E 2 - openai.com. https://openai.com/product/dall-e-2?ref=promptsreport.com.
39
+ - (6) OpenAI API. https://platform.openai.com/docs/models/dall-e.
40
+
41
+ """
42
+
43
+ # Define the pages
44
+ def page1():
45
+ st.title("OpenAI DALL·E")
46
+ st.markdown(introduction)
src/page2.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ # import openai
3
+ import replicate
4
+
5
+ def page2():
6
+ st.title("OpenAI DALL·E Image Generation")
7
+ st.info("""#### NOTE: you can download image by \
8
+ right clicking on the image and select save image as option""")
9
+
10
+ with st.form(key='form'):
11
+ prompt = st.text_input(label='Enter text prompt for image generation')
12
+ size = st.selectbox('Select size of the images',
13
+ ('256x256', '512x512', '1024x1024'))
14
+ num_images = st.selectbox('Enter number of images to be generated', (1,2,3,4))
15
+ submit_button = st.form_submit_button(label='Submit')
16
+
17
+ if submit_button:
18
+ if prompt:
19
+ output = replicate.run(
20
+ "stability-ai/stable-video-diffusion:3f0457e4619daac51203dedb472816fd4af51f3149fa7a9e0b5ffcf1b8172438",
21
+ input={
22
+ "input_image": "https://example.com/path/to/file/input_image"
23
+ }
24
+ )
25
+ print(output)
26
+
27
+ # response = openai.Image.create(
28
+ # prompt = prompt,
29
+ # n = num_images,
30
+ # size=size,
31
+ # )
32
+
33
+ # for idx in range(num_images):
34
+ # image_url = response["data"][idx]["url"]
35
+
36
+ # st.image(image_url, caption=f"Generated image: {idx+1}",
37
+ # use_column_width=True)
src/page3.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ # import openai
3
+ from PIL import Image
4
+ from src.utils import get_width_height, resize_image
5
+
6
+ def page3():
7
+ st.title("OpenAI DALL·E Image Variation")
8
+ st.info("""#### NOTE: you can download image by \
9
+ right clicking on the image and select save image as option""")
10
+
11
+ with st.form(key='form'):
12
+ uploaded_file = st.file_uploader("Choose an image file", type=['png', 'jpg'])
13
+ size = st.selectbox('Select size of the images',
14
+ ('256x256', '512x512', '1024x1024'))
15
+ num_images = st.selectbox('Enter number of images to be generated', (1,2,3,4))
16
+ submit_button = st.form_submit_button(label='Submit')
17
+
18
+ if submit_button:
19
+ if uploaded_file is not None:
20
+
21
+ image = Image.open(uploaded_file)
22
+
23
+ st.image(image, caption="Uploaded image", use_column_width=True)
24
+
25
+ width, height = get_width_height(size)
26
+ image = resize_image(image, width, height)
27
+
28
+ # response = openai.Image.create_variation(
29
+ # image=image,
30
+ # n = num_images,
31
+ # size=size,
32
+ # )
33
+
34
+ # for idx in range(num_images):
35
+ # image_url = response["data"][idx]["url"]
36
+
37
+ # st.image(image_url, caption=f"Generated image: {idx+1}",
38
+ # use_column_width=True)
src/page4.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ # import openai
3
+ from PIL import Image
4
+ # from src.utils import get_width_height, resize_image
5
+ # from rembg import remove
6
+
7
+ def page4():
8
+ st.title("OpenAI DALL·E Image Editing")
9
+ st.info("""#### NOTE: you can download image by \
10
+ right clicking on the image and select save image as option""")
11
+
12
+ with st.form(key='form'):
13
+ uploaded_file = st.file_uploader("Choose an image file", type=['png', 'jpg'])
14
+ mask_file = st.file_uploader("Choose an mask file", type=['png', 'jpg'])
15
+ prompt = st.text_input("Enter a text prompt")
16
+ size = st.selectbox('Select size of the images', ('256x256', '512x512', '1024x1024'))
17
+ num_images = st.selectbox('Enter number of images to be generated', (1,2,3,4))
18
+ submit_button = st.form_submit_button(label='Submit')
19
+
20
+ if submit_button:
21
+ if (uploaded_file is not None) and (mask_file is not None) and prompt:
22
+ our_image = Image.open(uploaded_file)
23
+ mask_image = Image.open(mask_file)
24
+
25
+ width, height = get_width_height(size)
26
+
27
+ our_image = resize_image(our_image, width, height)
28
+ mask_image = resize_image(mask_image, width, height)
29
+
30
+ st.image(our_image, caption="Uploaded image", use_column_width=True)
31
+ st.image(mask_image, caption="Uploaded mask", use_column_width=True)
32
+
33
+ backround_removed_mask = remove(mask_image)
34
+
35
+ st.image(backround_removed_mask, caption="backround_removed_mask",
36
+ use_column_width=True)
37
+
38
+ # response = openai.Image.create_edit(
39
+ # image=our_image,
40
+ # mask=backround_removed_mask,
41
+ # prompt=prompt,
42
+ # n=num_images,
43
+ # size=size
44
+ # )
45
+
46
+ # for idx in range(num_images):
47
+ # image_url = response["data"][idx]["url"]
48
+
49
+ # st.image(image_url, caption=f"Generated image: {idx+1}",
50
+ # use_column_width=True)
51
+
src/page5.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ # import openai
3
+ from streamlit_extras.stylable_container import stylable_container
4
+
5
+ import replicate
6
+
7
+ streamlit_style = """
8
+ <style>
9
+ #MainMenu {visibility: hidden;}
10
+ footer {visibility: hidden;}
11
+ video{width:200px;}
12
+ .css-1wbqy5l {visibility: hidden;}
13
+ .css-15zrgzn {visibility: hidden;}
14
+ .css-klqnuk {visibility: hidden;}
15
+ .en6cib64 {visibility: hidden;}
16
+ .css-1u4fkce {visibility: hidden;}
17
+ .en6cib62 {visibility: hidden;}
18
+ .css-19rxjzo, .ef3psqc11 {
19
+ background-color: purple;
20
+ text-color: white;
21
+ }
22
+ div.stButton > button:first-child {
23
+ background-color: red;
24
+ text-weight: bold;
25
+ }
26
+ </style>
27
+ """
28
+ def page5():
29
+ # st.title("Text to Image")
30
+ with stylable_container(
31
+ key="title",
32
+ css_styles=[
33
+ """ span {
34
+ text-align: center;
35
+ padding-top: 0px;
36
+ padding-right: 0px;
37
+ padding-bottom: 0px;
38
+ padding-left: 0px;
39
+ }"""
40
+ ,
41
+ """
42
+ st-emotion-cache-0{
43
+ text-align: center;
44
+ padding-top: 0px;
45
+ padding-right: 0px;
46
+ padding-bottom: 0px;
47
+ padding-left: 0px;
48
+ }""",
49
+
50
+ """
51
+ .e1f1d6gn0{
52
+ text-align: center;
53
+ padding-top: 0px;
54
+ padding-right: 0px;
55
+ padding-bottom: 0px;
56
+ padding-left: 0px;
57
+ }
58
+ """,
59
+ ],
60
+
61
+ ):
62
+ st.markdown("<h3>Text to Image<h3>", unsafe_allow_html=True)
63
+ st.markdown(streamlit_style, unsafe_allow_html=True)
64
+
65
+ # st.info("""#### NOTE: you can download image by \
66
+ # right clicking on the image and select save image as option""")
67
+ with st.form(key='form'):
68
+ prompt = st.text_input(label='Enter text prompt for image generation')
69
+ placeholder=st.empty()
70
+ col1,col2=placeholder.columns(2)
71
+ number_of_image = col1.number_input("Number of Images to Generate", step=1, min_value=1, max_value=8, value=1, placeholder="Type a number...")
72
+ # size = st.selectbox('Select size of the images',
73
+ # ('256x256', '512x512', '1024x1024'))
74
+ # num_images = st.selectbox('Enter number of images to be generated', (1,2,3,4))
75
+
76
+ submit_button = st.form_submit_button(label='Generate Image')
77
+
78
+
79
+ if submit_button:
80
+ if prompt and number_of_image:
81
+ with st.spinner("Generating Image"):
82
+ outputs = replicate.run(
83
+ "stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4",
84
+ input={
85
+ "width": 768,
86
+ "height": 448,
87
+ "prompt": prompt,
88
+ "scheduler": "K_EULER",
89
+ "num_outputs": number_of_image,
90
+ "guidance_scale": 7.5,
91
+ "num_inference_steps": 50
92
+ }
93
+ )
94
+
95
+ # for output in outputs:
96
+ # st.image(output,caption=prompt)
97
+ placeholder=st.empty()
98
+ col1,col2=placeholder.columns(2)
99
+ for index, output in enumerate(outputs):
100
+ if index%2==0:
101
+ col1.image(output)
102
+ else:
103
+ col2.image(output)
104
+
105
+ # response = openai.Image.create(
106
+ # prompt = prompt,
107
+ # n = num_images,
108
+ # size=size,
109
+ # )
110
+
111
+ # for idx in range(num_images):
112
+ # image_url = response["data"][idx]["url"]
113
+
114
+ # st.image(image_url, caption=f"Generated image: {idx+1}",
115
+ # use_column_width=True)
src/page6.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ # import openai
3
+ import replicate
4
+ import os
5
+ from dotenv import load_dotenv
6
+ from streamlit_extras.stylable_container import stylable_container
7
+ import streamlit_extras
8
+ # load_dotenv()
9
+ # REPLICATE_API_TOKEN = os.environ.get("REPLICATE_API_TOKEN")
10
+ os.environ['REPLICATE_API_TOKEN'] = 'r8_4fktoXrDGkgHY8uw1XlVtQJKQlAILKv0iBmPI'
11
+ replicate = replicate.Client(api_token='r8_4fktoXrDGkgHY8uw1XlVtQJKQlAILKv0iBmPI')
12
+
13
+ streamlit_style = """
14
+ <style>
15
+ #MainMenu {visibility: hidden;}
16
+ footer {visibility: hidden;}
17
+ video{width:200px;}
18
+ .css-1wbqy5l {visibility: hidden;}
19
+ .css-15zrgzn {visibility: hidden;}
20
+ .css-klqnuk {visibility: hidden;}
21
+ .en6cib64 {visibility: hidden;}
22
+ .css-1u4fkce {visibility: hidden;}
23
+ .en6cib62 {visibility: hidden;}
24
+ .css-19rxjzo, .ef3psqc11 {
25
+ background-color: purple;
26
+ text-color: white;
27
+ }
28
+ div.stButton > button:first-child {
29
+ background-color: darkgreen;
30
+ text-weight: bold;
31
+ }
32
+
33
+ </style>
34
+ """
35
+ def page6():
36
+ with stylable_container(
37
+ key="title",
38
+ css_styles=[
39
+ """ span {
40
+ text-align: center;
41
+ padding-top: 0px;
42
+ padding-right: 0px;
43
+ padding-bottom: 0px;
44
+ padding-left: 0px;
45
+ }"""
46
+ ,
47
+ """
48
+ st-emotion-cache-0{
49
+ text-align: center;
50
+ padding-top: 0px;
51
+ padding-right: 0px;
52
+ padding-bottom: 0px;
53
+ padding-left: 0px;
54
+ }""",
55
+
56
+ """
57
+ .e1f1d6gn0{
58
+ text-align: center;
59
+ padding-top: 0px;
60
+ padding-right: 0px;
61
+ padding-bottom: 0px;
62
+ padding-left: 0px;
63
+ }
64
+
65
+
66
+ """,
67
+ ],
68
+
69
+ ):
70
+ st.markdown("<h3>Image to Text</h3>", unsafe_allow_html=True) #This is under a css style
71
+ st.markdown(streamlit_style, unsafe_allow_html=True)
72
+
73
+
74
+ image_file=st.file_uploader("Select Image", type=['jpeg','jpg','png'])
75
+
76
+ if image_file is not None:
77
+ placeholder=st.empty()
78
+ col1,col2=placeholder.columns(2)
79
+ col1.text("Uploaded Image")
80
+ col1.image(image_file)
81
+ prompt = st.text_input(label='Ask question related to image')
82
+
83
+
84
+ submit_button = st.button(label='Requestion Answer')
85
+
86
+
87
+
88
+ if submit_button:
89
+ if prompt and (image_file is not None):
90
+ with st.spinner("Recognizing Image...."):
91
+ output = replicate.run(
92
+ "nateraw/video-llava:a494250c04691c458f57f2f8ef5785f25bc851e0c91fd349995081d4362322dd", input={
93
+ "image_path": image_file,
94
+ "text_prompt": prompt
95
+ }
96
+ )
97
+ print(output)
98
+
99
+ col2.text("Response")
100
+ col2.markdown(output)
101
+
102
+
src/page7.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ # import openai
3
+ import replicate
4
+ import os
5
+ from dotenv import load_dotenv
6
+ from streamlit_extras.stylable_container import stylable_container
7
+ import streamlit_extras
8
+ # load_dotenv()
9
+ # REPLICATE_API_TOKEN = os.environ.get("REPLICATE_API_TOKEN")
10
+ os.environ['REPLICATE_API_TOKEN'] = 'r8_4fktoXrDGkgHY8uw1XlVtQJKQlAILKv0iBmPI'
11
+ replicate = replicate.Client(api_token='r8_4fktoXrDGkgHY8uw1XlVtQJKQlAILKv0iBmPI')
12
+
13
+ streamlit_style = """
14
+ <style>
15
+ #MainMenu {visibility: hidden;}
16
+ footer {visibility: hidden;}
17
+ video{width:200px;}
18
+ .css-1wbqy5l {visibility: hidden;}
19
+ .css-15zrgzn {visibility: hidden;}
20
+ .css-klqnuk {visibility: hidden;}
21
+ .en6cib64 {visibility: hidden;}
22
+ .css-1u4fkce {visibility: hidden;}
23
+ .en6cib62 {visibility: hidden;}
24
+ .css-19rxjzo, .ef3psqc11 {
25
+ background-color: purple;
26
+ text-color: white;
27
+ }
28
+ div.stButton > button:first-child {
29
+ background-color: darkgreen;
30
+ text-weight: bold;
31
+ }
32
+
33
+ </style>
34
+ """
35
+ def page7():
36
+ with stylable_container(
37
+ key="title",
38
+ css_styles=[
39
+ """ span {
40
+ text-align: center;
41
+ padding-top: 0px;
42
+ padding-right: 0px;
43
+ padding-bottom: 0px;
44
+ padding-left: 0px;
45
+ }"""
46
+ ,
47
+ """
48
+ st-emotion-cache-0{
49
+ text-align: center;
50
+ padding-top: 0px;
51
+ padding-right: 0px;
52
+ padding-bottom: 0px;
53
+ padding-left: 0px;
54
+ }""",
55
+
56
+ """
57
+ .e1f1d6gn0{
58
+ text-align: center;
59
+ padding-top: 0px;
60
+ padding-right: 0px;
61
+ padding-bottom: 0px;
62
+ padding-left: 0px;
63
+ }
64
+
65
+
66
+ """,
67
+ ],
68
+
69
+ ):
70
+ st.markdown("<h3>Image to Video</h3>", unsafe_allow_html=True) #This is under a css style
71
+ st.markdown(streamlit_style, unsafe_allow_html=True)
72
+
73
+ image_file=st.file_uploader("Select Image", type=['jpeg','jpg','png'])
74
+ if image_file is not None:
75
+ placeholder=st.empty()
76
+ col1,col2=placeholder.columns(2)
77
+ col1.text("Uploaded Image")
78
+ col1.image(image_file)
79
+ prompt = st.text_input(label='Enter text prompt for Video generation')
80
+ submit_button = st.button(label='Generate Video')
81
+
82
+ if submit_button:
83
+ if prompt and (image_file is not None):
84
+
85
+ with st.spinner("Generating Video. It may require few minutes so please wait...."):
86
+ output = replicate.run(
87
+ "ali-vilab/i2vgen-xl:5821a338d00033abaaba89080a17eb8783d9a17ed710a6b4246a18e0900ccad4",
88
+ input={
89
+ "image": image_file,
90
+ "prompt": prompt,
91
+ "max_frames": 25,
92
+ "guidance_scale": 9,
93
+ "num_inference_steps": 50
94
+ }
95
+ )
96
+
97
+ col2.text("Generated Video from Image")
98
+ col2.video(output)
99
+ st.markdown(
100
+ """
101
+ <script>
102
+ const video = document.querySelector('video');
103
+ video.loop = true;
104
+ video.autoplay = true;
105
+ </script>
106
+ """,
107
+ unsafe_allow_html=True,
108
+ )
109
+
110
+
src/page8.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ # import openai
3
+ import replicate
4
+ import os
5
+ from dotenv import load_dotenv
6
+ from streamlit_extras.stylable_container import stylable_container
7
+ import streamlit_extras
8
+ # load_dotenv()
9
+ # REPLICATE_API_TOKEN = os.environ.get("REPLICATE_API_TOKEN")
10
+ os.environ['REPLICATE_API_TOKEN'] = 'r8_4fktoXrDGkgHY8uw1XlVtQJKQlAILKv0iBmPI'
11
+ replicate = replicate.Client(api_token='r8_4fktoXrDGkgHY8uw1XlVtQJKQlAILKv0iBmPI')
12
+
13
+ streamlit_style = """
14
+ <style>
15
+ #MainMenu {visibility: hidden;}
16
+ footer {visibility: hidden;}
17
+ video{width:200px;}
18
+ .css-1wbqy5l {visibility: hidden;}
19
+ .css-15zrgzn {visibility: hidden;}
20
+ .css-klqnuk {visibility: hidden;}
21
+ .en6cib64 {visibility: hidden;}
22
+ .css-1u4fkce {visibility: hidden;}
23
+ .en6cib62 {visibility: hidden;}
24
+ .css-19rxjzo, .ef3psqc11 {
25
+ background-color: purple;
26
+ text-color: white;
27
+ }
28
+ div.stButton > button:first-child {
29
+ background-color: darkgreen;
30
+ text-weight: bold;
31
+ }
32
+
33
+ </style>
34
+ """
35
+ def page8():
36
+ with stylable_container(
37
+ key="title",
38
+ css_styles=[
39
+ """ span {
40
+ text-align: center;
41
+ padding-top: 0px;
42
+ padding-right: 0px;
43
+ padding-bottom: 0px;
44
+ padding-left: 0px;
45
+ }"""
46
+ ,
47
+ """
48
+ st-emotion-cache-0{
49
+ text-align: center;
50
+ padding-top: 0px;
51
+ padding-right: 0px;
52
+ padding-bottom: 0px;
53
+ padding-left: 0px;
54
+ }""",
55
+
56
+ """
57
+ .e1f1d6gn0{
58
+ text-align: center;
59
+ padding-top: 0px;
60
+ padding-right: 0px;
61
+ padding-bottom: 0px;
62
+ padding-left: 0px;
63
+ }
64
+
65
+
66
+ """,
67
+ ],
68
+
69
+ ):
70
+ st.markdown("<h3>Text to video</h3>", unsafe_allow_html=True) #This is under a css style
71
+ st.markdown(streamlit_style, unsafe_allow_html=True)
72
+
73
+ with st.form(key='form'):
74
+ prompt = st.text_input(label='Enter text prompt for Video generation')
75
+ placeholder=st.empty()
76
+ col1,col2=placeholder.columns(2)
77
+ video_length = col1.number_input("Enter video length (seconds)", step=1, min_value=1, max_value=8, value=4, placeholder="Type a number...")
78
+ submit_button = st.form_submit_button(label='Generate Video')
79
+
80
+ if submit_button:
81
+ if prompt:
82
+ placeholder=st.empty()
83
+ col1,col2=placeholder.columns(2)
84
+ with st.spinner("Generating Video from Text. Please wait...."):
85
+ output = replicate.run(
86
+ "cjwbw/text2video-zero:e671ffe4e976c0ec813f15a9836ebcfd08857ac2669af6917e3c2549307f9fae",
87
+ input={
88
+ "fps": 4,
89
+ "prompt": prompt,
90
+ "model_name": "dreamlike-art/dreamlike-photoreal-2.0",
91
+ "timestep_t0": 44,
92
+ "timestep_t1": 47,
93
+ "video_length": video_length,
94
+ "negative_prompt": "",
95
+ "motion_field_strength_x": 12,
96
+ "motion_field_strength_y": 12
97
+ }
98
+ )
99
+
100
+ col1.text("Generated Video")
101
+ col1.video(output)
102
+ st.markdown(
103
+ """
104
+ <script>
105
+ const video = document.querySelector('video');
106
+ video.loop = true;
107
+ video.autoplay = true;
108
+ </script>
109
+ """,
110
+ unsafe_allow_html=True,
111
+ )
112
+
113
+
src/utils.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import BytesIO
2
+ from PIL.PngImagePlugin import PngImageFile
3
+ from PIL.JpegImagePlugin import JpegImageFile
4
+ from typing import Union, List
5
+
6
+ ImageLike = Union[PngImageFile, JpegImageFile]
7
+
8
+ def resize_image(image: ImageLike, width: int, height: int) -> bytes:
9
+ """resize image to the given width and height
10
+
11
+ Args:
12
+ image (ImageLike): input image to resize of type PngImageFile or JpegImageFile
13
+ width (int): expected width of the image
14
+ height (int): expected height of the image
15
+
16
+ Returns:
17
+ bytes: a new image with the given width and height in PNG format
18
+ """
19
+ image = image.resize((width, height))
20
+ bytes_stream = BytesIO()
21
+ image.save(bytes_stream, format='PNG')
22
+ return bytes_stream.getvalue()
23
+
24
+
25
+ def get_width_height(size: str) -> List:
26
+ """get width and height of the image from the given size as a string, for example -
27
+ size = '512x512'
28
+
29
+ Args:
30
+ size (str): size described as '_width_x_height_' example '512x512'
31
+
32
+ Returns:
33
+ List: returns a list of interger as [width, height] extracted from the
34
+ given size
35
+ """
36
+ # size = '512x512'
37
+ return [int(val) for val in size.split("x")] # [512, 512]