mycarehero / app.py
Ibrahim Animashaun
add application file
f07f428
# from dotenv import load_dotenv
# load_dotenv()
import io
import os
import warnings
from PIL import Image
from stability_sdk import client
import stability_sdk.interfaces.gooseai.generation.generation_pb2 as generation
import gradio as gr
# STABILITY_KEY = os.getenv('STABILITY_KEY')
# STABILITY_HOST = os.getenv('STABILITY_HOST')
STABILITY_KEY = os.environ.get("STABILITY_KEY")
STABILITY_HOST = os.environ.get('STABILITY_HOST')
# Set up our connection to the API.
stability_api = client.StabilityInference(
key=STABILITY_KEY, # API Key reference.
verbose=True, # Print debug messages.
engine="stable-diffusion-v1-5", # Set the engine to use for generation. For SD 2.0 use "stable-diffusion-v2-0".
# Available engines: stable-diffusion-v1 stable-diffusion-v1-5 stable-diffusion-512-v2-0 stable-diffusion-768-v2-0 stable-inpainting-v1-0 stable-inpainting-512-v2-0
)
prompt = "clear portrait of a superhero concept from avengers, supercool!!, background hyper detailed, character concept, full body, dynamic pose, intricate, highly detailed, digital painting, artstation, concept art, smooth, sharp focus, illustration, beautiful face"
def img2img(prompt, img):
#@title <a name="Step 5"><font color="#FFFFFF">5. Set up an initial image based on our previous generation, and use a prompt to convert it into a crayon drawing.</font></a>
# init_img = Image.open(base64_string).convert("RGB")
# img = init_img.resize((768, 512))
img = img.resize((768, 512))
# Set up our initial generation parameters.
answers = stability_api.generate(
prompt=prompt,
init_image=img, # Assign our previously generated img as our Initial Image for transformation.
start_schedule=0.6, # Set the strength of our prompt in relation to our initial image.
seed=123467458, # If attempting to transform an image that was previously generated with our API,
# initial images benefit from having their own distinct seed rather than using the seed of the original image generation.
steps=30, # Amount of inference steps performed on image generation. Defaults to 30.
cfg_scale=8.0, # Influences how strongly your generation is guided to match your prompt.
# Setting this value higher increases the strength in which it tries to match your prompt.
# Defaults to 7.0 if not specified.
width=512, # Generation width, defaults to 512 if not included.
height=512, # Generation height, defaults to 512 if not included.
samples=1
# sampler=generation.SAMPLER_K_DPMPP_2M # Choose which sampler we want to denoise our generation with.
# Defaults to k_dpmpp_2m if not specified. Clip Guidance only supports ancestral samplers.
# (Available Samplers: ddim, plms, k_euler, k_euler_ancestral, k_heun, k_dpm_2, k_dpm_2_ancestral, k_dpmpp_2s_ancestral, k_lms, k_dpmpp_2m)
)
# Set up our warning to print to the console if the adult content classifier is tripped.
# If adult content classifier is not tripped, display generated image.
for resp in answers:
for artifact in resp.artifacts:
if artifact.finish_reason == generation.FILTER:
warnings.warn(
"Your request activated the API's safety filters and could not be processed."
"Please modify the prompt and try again.")
if artifact.type == generation.ARTIFACT_IMAGE:
img2 = Image.open(io.BytesIO(artifact.binary)) # Set our resulting initial image generation as 'img2' to avoid overwriting our previous 'img' generation.
# display(img2)
return img2
gr.Interface(fn=img2img, inputs=[gr.Text(value=prompt), gr.Image(type='pil')],
outputs=gr.Image(type='pil')).launch(debug=True)
# def greet(name):
# return "Hello " + name + "!!"
# iface = gr.Interface(fn=greet, inputs="text", outputs="text")
# iface.launch()