Spaces:
Running
Running
File size: 5,749 Bytes
9565d24 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 |
import streamlit as st
from dotenv import load_dotenv
import os
from torch import autocast
#import openai
from diffusers import StableDiffusionPipeline
import torch
from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
import base64
import streamlit as st
from PIL import Image
import io
import torch
from diffusers import StableDiffusionPipeline
def get_base64(bin_file):
with open(bin_file, 'rb') as f:
data = f.read()
return base64.b64encode(data).decode()
# def set_background(png_file):
# bin_str = get_base64(png_file)
# page_bg_img = '''
# <style>
# .stApp {
# background-image: url("data:image/png;base64,%s");
# background-size: cover;
# }
# </style>
# ''' % bin_str
# st.markdown(page_bg_img, unsafe_allow_html=True)
# set_background('/home/ubuntu/AI-Image-Generation-Streamlit-App/8L0A9954.png')
#load_dotenv()
#openai.api_key = os.getenv("OPENAI_API_KEY")
#function to generate AI based images using OpenAI Dall-E
# def generate_images_using_openai(text):
# response = openai.Image.create(prompt= text, n=1, size="512x512")
# image_url = response['data'][0]['url']
# return image_url
# #function to generate AI based images using Huggingface Diffusers
# def generate_images_using_huggingface_diffusers(text):
# pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
# pipe = pipe.to("cuda")
# prompt = text
# image = pipe(prompt).images[0]
# return image
def Animegen(text):
model = "Linaqruf/animagine-xl"
pipe = StableDiffusionXLPipeline.from_pretrained(
model,
torch_dtype=torch.float16,
use_safetensors=True,
variant="fp16"
)
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to('cuda')
prompt = text
image = pipe(
prompt,
width=1024,
height=1024,
guidance_scale=12,
target_size=(1024,1024),
original_size=(4096,4096),
num_inference_steps=50
).images[0]
return image
# def Avatars(text):
# model_id = "riccardogiorato/avatar-diffusion"
# pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
# pipe = pipe.to("cuda")
# prompt = text
# image = pipe(prompt).images[0]
# return image
#Streamlit Code
choice = st.sidebar.selectbox("Select your choice", ["Home","ImageGenerator"])
if choice == "Home":
st.title(" WoxPix ")
with st.expander("About the App"):
st.write("AI-generated images are images produced using artificial intelligence methods, like Generative Adversarial Networks (GANs) or similar algorithms. These images are not taken from the internet or captured using cameras. Instead, they are created by AI models that analyze patterns in existing data and generate novel images resembling the patterns they've learned from.")
st.write("Keep in mind that AI-generated visuals may not always correspond to your exact vision, and you may need to experiment with different prompts to achieve the ideal result. Furthermore, depending on the platform and the intricacy of your request, the quality and realism of AI-generated graphics can differ.")
if choice == "ImageGenerator":
st.subheader("Image Generator")
input_prompt = st.text_input("Enter your text prompt")
# if input_prompt is not None:
# if st.button("Generate Image"):
# image_url = Animegen(input_prompt)
# st.image(image_url, caption="Generated Animegen")
if input_prompt is not None:
if st.button("Generate Image"):
image_output = Animegen(input_prompt)
st.info("Generating image.....")
st.success("Image Generated Successfully")
st.image(image_output, caption="Generated by AI Research Centre - Woxsen University")
buffered = io.BytesIO()
image_output.save(buffered, format="PNG")
st.download_button(
label="Download Image",
data=buffered.getvalue(),
file_name="generated_image.png",
mime="image/png"
)
# st.button("Download Generated Image")
# image = Image.open(image_output)
# img_byte_array = image.getvalue()
# st.download_button(
# label="Click here to download the generated image as JPEG",
# data=img_byte_array,
# file_name="generated_image.jpeg",
# mime="image/jpeg",
# )
# elif choice == "Avatars":
# st.subheader("Avatars")
# input_prompt = st.text_input("Enter your text prompt")
# if input_prompt is not None:
# if st.button("Generate Image"):
# image_url = Avatars(input_prompt)
# st.image(image_url, caption="Generated Avatar")
# elif choice == "Huggingface Diffusers":
# st.subheader("Innovative Image Generation")
# input_prompt = st.text_input("Enter your text prompt")
# if input_prompt is not None:
# if st.button("Generate Image"):
# image_output = generate_images_using_huggingface_diffusers(input_prompt)
# st.info("Generating image.....")
# st.success("Image Generated Successfully")
# st.image(image_output, caption="Generated by Huggingface Diffusers")
# model_id = "riccardogiorato/avatar-diffusion"
# pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
# pipe = pipe.to("cuda")
# prompt = "a magical witch with blue hair with avatartwow style"
# image = pipe(prompt).images[0]
# image.save("./magical_witch.png")
small_image_width = 200 |