Spaces:
Running
Running
hatmanstack
commited on
Commit
โข
76f94b4
1
Parent(s):
db4aa1d
first
Browse files- README.md +35 -8
- app.py +177 -0
- requirements.txt +3 -0
README.md
CHANGED
@@ -1,13 +1,40 @@
|
|
1 |
-
---
|
2 |
title: AWS Nova Canvas
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 5.
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
-
|
11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
1 |
title: AWS Nova Canvas
|
2 |
+
emoji: ๐ผ๏ธ
|
3 |
+
colorFrom: blue
|
4 |
+
colorTo: red
|
5 |
sdk: gradio
|
6 |
+
sdk_version: 5.6.0
|
7 |
app_file: app.py
|
8 |
pinned: false
|
9 |
+
license: apache2.0
|
10 |
+
short_description: Generate image variations
|
11 |
+
|
12 |
+
|
13 |
+
# Amazon Nova Canvas Image Generation
|
14 |
+
|
15 |
+
This Gradio application demonstrates various image generation capabilities using the Amazon Nova Canvas model. The application provides multiple functionalities, each accessible through its own tab, allowing users to generate and manipulate images based on text prompts and other inputs.
|
16 |
+
|
17 |
+
## Features
|
18 |
+
|
19 |
+
- **Text to Image**: Generate an image from a text prompt using the Amazon Nova Canvas model.
|
20 |
+
- **Inpainting**: Modify specific areas of an image based on a text prompt.
|
21 |
+
- **Outpainting**: Extend an image beyond its original borders using a mask and text prompt.
|
22 |
+
- **Image Variation**: Create variations of an image based on a text description.
|
23 |
+
- **Image Conditioning**: Generate an image conditioned on an input image and a text prompt.
|
24 |
+
- **Color Guided Content**: Generate an image using a color palette from a reference image and a text prompt.
|
25 |
+
- **Background Removal**: Remove the background from an image.
|
26 |
+
|
27 |
+
## Usage
|
28 |
+
|
29 |
+
1. **Text to Image**: Enter a descriptive text prompt and click "Generate" to create an image.
|
30 |
+
2. **Inpainting**: Upload an image, provide a mask prompt, and click "Generate" to modify specific areas.
|
31 |
+
3. **Outpainting**: Upload an image and a mask image, provide a text prompt, and click "Generate" to extend the image.
|
32 |
+
4. **Image Variation**: Upload an image, provide a text description, and click "Generate" to create variations.
|
33 |
+
5. **Image Conditioning**: Upload an image, provide a text prompt, and click "Generate" to condition the image.
|
34 |
+
6. **Color Guided Content**: Upload an image, provide a text prompt and color palette, and click "Generate" to guide content generation.
|
35 |
+
7. **Background Removal**: Upload an image and click "Generate" to remove the background.
|
36 |
+
|
37 |
+
## Excellent Documentation
|
38 |
+
|
39 |
+
<p>For more information, visit <a href="https://docs.aws.amazon.com/nova/latest/userguide/what-is-nova.html">AWS Nova documentation</a>.</p>
|
40 |
|
|
app.py
ADDED
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import base64
|
2 |
+
import io
|
3 |
+
import json
|
4 |
+
import logging
|
5 |
+
import boto3
|
6 |
+
from PIL import Image
|
7 |
+
import gradio as gr
|
8 |
+
|
9 |
+
# Set up logging
|
10 |
+
logger = logging.getLogger(__name__)
|
11 |
+
logging.basicConfig(level=logging.INFO)
|
12 |
+
|
13 |
+
# Custom exception for image errors
|
14 |
+
class ImageError(Exception):
|
15 |
+
def __init__(self, message):
|
16 |
+
self.message = message
|
17 |
+
|
18 |
+
model_id = 'amazon.nova-canvas-v1:0'
|
19 |
+
# Function to generate an image using Amazon Nova Canvas model
|
20 |
+
def generate_image(body):
|
21 |
+
logger.info("Generating image with Amazon Nova Canvas model %s", model_id)
|
22 |
+
session = boto3.Session(aws_access_key_id=aws_id, aws_secret_access_key=aws_secret, region_name='us-east-1')
|
23 |
+
bedrock = session.client('bedrock-runtime')
|
24 |
+
accept = "application/json"
|
25 |
+
content_type = "application/json"
|
26 |
+
|
27 |
+
response = bedrock.invoke_model(body=body, modelId=model_id, accept=accept, contentType=content_type)
|
28 |
+
response_body = json.loads(response.get("body").read())
|
29 |
+
|
30 |
+
base64_image = response_body.get("images")[0]
|
31 |
+
base64_bytes = base64_image.encode('ascii')
|
32 |
+
image_bytes = base64.b64decode(base64_bytes)
|
33 |
+
|
34 |
+
finish_reason = response_body.get("error")
|
35 |
+
if finish_reason is not None:
|
36 |
+
raise ImageError(f"Image generation error. Error is {finish_reason}")
|
37 |
+
|
38 |
+
logger.info("Successfully generated image with Amazon Nova Canvas model %s", model_id)
|
39 |
+
return image_bytes
|
40 |
+
|
41 |
+
# Function to display image from bytes
|
42 |
+
def display_image(image_bytes):
|
43 |
+
image = Image.open(io.BytesIO(image_bytes))
|
44 |
+
return image
|
45 |
+
|
46 |
+
# Gradio functions for each task
|
47 |
+
def text_to_image(prompt):
|
48 |
+
body = json.dumps({
|
49 |
+
"taskType": "TEXT_IMAGE",
|
50 |
+
"textToImageParams": {"text": prompt},
|
51 |
+
"imageGenerationConfig": {"numberOfImages": 1, "height": 1024, "width": 1024, "cfgScale": 8.0, "seed": 0}
|
52 |
+
})
|
53 |
+
image_bytes = generate_image(body)
|
54 |
+
return display_image(image_bytes)
|
55 |
+
|
56 |
+
def inpainting(image, mask_prompt):
|
57 |
+
input_image = base64.b64encode(image.read()).decode('utf8')
|
58 |
+
body = json.dumps({
|
59 |
+
"taskType": "INPAINTING",
|
60 |
+
"inPaintingParams": {"text": mask_prompt, "image": input_image, "maskPrompt": "windows"},
|
61 |
+
"imageGenerationConfig": {"numberOfImages": 1, "height": 512, "width": 512, "cfgScale": 8.0}
|
62 |
+
})
|
63 |
+
image_bytes = generate_image(body)
|
64 |
+
return display_image(image_bytes)
|
65 |
+
|
66 |
+
def outpainting(image, mask_image, text):
|
67 |
+
input_image = base64.b64encode(image.read()).decode('utf8')
|
68 |
+
input_mask_image = base64.b64encode(mask_image.read()).decode('utf8')
|
69 |
+
body = json.dumps({
|
70 |
+
"taskType": "OUTPAINTING",
|
71 |
+
"outPaintingParams": {"text": text, "image": input_image, "maskImage": input_mask_image, "outPaintingMode": "DEFAULT"},
|
72 |
+
"imageGenerationConfig": {"numberOfImages": 1, "height": 512, "width": 512, "cfgScale": 8.0}
|
73 |
+
})
|
74 |
+
image_bytes = generate_image(body)
|
75 |
+
return display_image(image_bytes)
|
76 |
+
|
77 |
+
def image_variation(image, text):
|
78 |
+
input_image = base64.b64encode(image.read()).decode('utf8')
|
79 |
+
body = json.dumps({
|
80 |
+
"taskType": "IMAGE_VARIATION",
|
81 |
+
"imageVariationParams": {"text": text, "images": [input_image], "similarityStrength": 0.7},
|
82 |
+
"imageGenerationConfig": {"numberOfImages": 1, "height": 512, "width": 512, "cfgScale": 8.0}
|
83 |
+
})
|
84 |
+
image_bytes = generate_image(body)
|
85 |
+
return display_image(image_bytes)
|
86 |
+
|
87 |
+
def image_conditioning(image, text):
|
88 |
+
input_image = base64.b64encode(image.read()).decode('utf8')
|
89 |
+
body = json.dumps({
|
90 |
+
"taskType": "TEXT_IMAGE",
|
91 |
+
"textToImageParams": {"text": text, "conditionImage": input_image, "controlMode": "CANNY_EDGE"},
|
92 |
+
"imageGenerationConfig": {"numberOfImages": 1, "height": 512, "width": 512, "cfgScale": 8.0}
|
93 |
+
})
|
94 |
+
image_bytes = generate_image(body)
|
95 |
+
return display_image(image_bytes)
|
96 |
+
|
97 |
+
def color_guided_content(image, text, colors):
|
98 |
+
input_image = base64.b64encode(image.read()).decode('utf8')
|
99 |
+
body = json.dumps({
|
100 |
+
"taskType": "COLOR_GUIDED_GENERATION",
|
101 |
+
"colorGuidedGenerationParams": {"text": text, "referenceImage": input_image, "colors": colors},
|
102 |
+
"imageGenerationConfig": {"numberOfImages": 1, "height": 512, "width": 512, "cfgScale": 8.0}
|
103 |
+
})
|
104 |
+
image_bytes = generate_image(body)
|
105 |
+
return display_image(image_bytes)
|
106 |
+
|
107 |
+
def background_removal(image):
|
108 |
+
input_image = base64.b64encode(image.read()).decode('utf8')
|
109 |
+
body = json.dumps({
|
110 |
+
"taskType": "BACKGROUND_REMOVAL",
|
111 |
+
"backgroundRemovalParams": {"image": input_image}
|
112 |
+
})
|
113 |
+
image_bytes = generate_image(body)
|
114 |
+
return display_image(image_bytes)
|
115 |
+
|
116 |
+
# Gradio Interface
|
117 |
+
with gr.Blocks() as demo:
|
118 |
+
gr.Markdown("# Amazon Nova Canvas Image Generation")
|
119 |
+
|
120 |
+
with gr.Tab("Text to Image"):
|
121 |
+
with gr.Column():
|
122 |
+
gr.Markdown("Generate an image from a text prompt using the Amazon Nova Canvas model.")
|
123 |
+
prompt = gr.Textbox(label="Prompt")
|
124 |
+
output = gr.Image()
|
125 |
+
gr.Button("Generate").click(text_to_image, inputs=prompt, outputs=output)
|
126 |
+
|
127 |
+
with gr.Tab("Inpainting"):
|
128 |
+
with gr.Column():
|
129 |
+
gr.Markdown("Use inpainting to modify specific areas of an image based on a text prompt.")
|
130 |
+
image = gr.Image(type='pil', label="Input Image")
|
131 |
+
mask_prompt = gr.Textbox(label="Mask Prompt")
|
132 |
+
output = gr.Image()
|
133 |
+
gr.Button("Generate").click(inpainting, inputs=[image, mask_prompt], outputs=output)
|
134 |
+
|
135 |
+
with gr.Tab("Outpainting"):
|
136 |
+
with gr.Column():
|
137 |
+
gr.Markdown("Extend an image beyond its original borders using a mask and text prompt.")
|
138 |
+
image = gr.Image(type='pil', label="Input Image")
|
139 |
+
mask_image = gr.Image(type='pil', label="Mask Image")
|
140 |
+
text = gr.Textbox(label="Text")
|
141 |
+
output = gr.Image()
|
142 |
+
gr.Button("Generate").click(outpainting, inputs=[image, mask_image, text], outputs=output)
|
143 |
+
|
144 |
+
with gr.Tab("Image Variation"):
|
145 |
+
with gr.Column():
|
146 |
+
gr.Markdown("Create variations of an image based on a text description.")
|
147 |
+
image = gr.Image(type='pil', label="Input Image")
|
148 |
+
text = gr.Textbox(label="Text")
|
149 |
+
output = gr.Image()
|
150 |
+
gr.Button("Generate").click(image_variation, inputs=[image, text], outputs=output)
|
151 |
+
|
152 |
+
with gr.Tab("Image Conditioning"):
|
153 |
+
with gr.Column():
|
154 |
+
gr.Markdown("Generate an image conditioned on an input image and a text prompt.")
|
155 |
+
image = gr.Image(type='pil', label="Input Image")
|
156 |
+
text = gr.Textbox(label="Text")
|
157 |
+
output = gr.Image()
|
158 |
+
gr.Button("Generate").click(image_conditioning, inputs=[image, text], outputs=output)
|
159 |
+
|
160 |
+
with gr.Tab("Color Guided Content"):
|
161 |
+
with gr.Column():
|
162 |
+
gr.Markdown("Generate an image using a color palette from a reference image and a text prompt.")
|
163 |
+
image = gr.Image(type='pil', label="Input Image")
|
164 |
+
text = gr.Textbox(label="Text")
|
165 |
+
colors = gr.Textbox(label="Colors (comma-separated hex values)")
|
166 |
+
output = gr.Image()
|
167 |
+
gr.Button("Generate").click(color_guided_content, inputs=[image, text, colors], outputs=output)
|
168 |
+
|
169 |
+
with gr.Tab("Background Removal"):
|
170 |
+
with gr.Column():
|
171 |
+
gr.Markdown("Remove the background from an image.")
|
172 |
+
image = gr.Image(type='pil', label="Input Image")
|
173 |
+
output = gr.Image()
|
174 |
+
gr.Button("Generate").click(background_removal, inputs=image, outputs=output)
|
175 |
+
|
176 |
+
if __name__ == "__main__":
|
177 |
+
demo.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
spaces
|
2 |
+
gradio
|
3 |
+
boto3
|