genpixel / app.py
jn1xia's picture
Update app.py
790e413 verified
raw
history blame
6.12 kB
import streamlit as st
from PIL import Image
import requests
from io import BytesIO
# import tensorflow as tf
import streamlit as st
from diffusers import StableDiffusionPipeline
import torch
#model_id = "CompVis/stable-diffusion-v1-4"
#pipe = StableDiffusionPipeline.from_pretrained(model_id)
# Ensure the model is using the CPU
#pipe = pipe.to("cpu")
image_html = ""
# Function to display an example image
def display_example_image(url):
response = requests.get(url)
img = Image.open(BytesIO(response.content))
st.image(img, caption='Generated Image', use_column_width=True)
#function to generate AI based images using Huggingface Diffusers
def generate_images_using_huggingface_diffusers(text):
# pipe = StableDiffusionPipeline.from_pretrained("sd-dreambooth-library/cat-toy", torch_dtype=torch.float16)
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float32)
pipe = pipe.to("cuda")
prompt = text
image = pipe(prompt,num_images_per_prompt=3)
return image
# Placeholder function for generating images (replace this with your actual generative AI code)
def generate_images(prompt, num_images=3):
# This is a placeholder function. Replace it with your actual image generation code.
# Here, we are just returning the same example image multiple times for demonstration.
image_url = pipe(prompt, num_images_per_prompt=num_samples, num_inference_steps=50, guidance_scale=7.5).images # Replace with a valid image URL
response = requests.get(image_url)
img = Image.open(BytesIO(response.content))
image_html = image_url
return [img] * num_images
title_center = """
<style>
.title{
text-align: center
}
</style>
"""
# Title of the app
st.markdown(title_center, unsafe_allow_html=True)
title_container = """
<h1 class="title">AutoFloor</h1>
"""
st.markdown(title_container, unsafe_allow_html=True)
# Text input for user prompt
user_prompt = st.text_input("Enter your prompt here:")
# file= st.file_uploader ("Unggah file Gambar", type = ["jpg", "png"])
# model = tf.keras.models.load_model('L00005_HL512_bagus.h5')
st.markdown("""
<style>.element-container:has(#button-after) + div button {
margin: 0 auto;
display: block;
}</style>""", unsafe_allow_html=True)
st.markdown('<span id="button-after"></span>', unsafe_allow_html=True)
# Generate and display images in a 3x3 grid
if st.button('Generate Images', type="primary"):
if user_prompt:
st.write(f"Prompt: {user_prompt}")
#image_url = "https://wpmedia.roomsketcher.com/content/uploads/2022/01/06145940/What-is-a-floor-plan-with-dimensions.png" # Replace with a valid image URL
# Generate images based on the user's prompt
#generated_images = generate_images(user_prompt, num_images=3)
image_output = generate_images_using_huggingface_diffusers(user_prompt)
st.info("Generating image.....")
st.success("Image Generated Successfully")
st.image(image_output, caption="Generated by Huggingface Diffusers")
html_code = f"""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<style>
.zoomable-image {{
transition: transform 0.3s ease;
cursor: pointer;
}}
</style>
<script src="https://unpkg.com/panzoom@9.4.3/dist/panzoom.min.js"></script>
</head>
<body>
<div id="image-container" style="text-align: center;">
<img id="zoomable-image" class="zoomable-image" src="{{image_url}}" alt="Zoomable Image" style="max-width: 100%; height: auto;">
</div>
<script>
document.addEventListener('DOMContentLoaded', (event) => {{
const image = document.getElementById('zoomable-image');
const panzoomInstance = panzoom(image, {{
maxZoom: 3,
minZoom: 1,
bounds: false,
boundsPadding: 0.1
}});
image.addEventListener('click', () => {{
const currentTransform = image.style.transform;
if (currentTransform.includes('matrix')) {{
panzoomInstance.zoomAbs(0, 0, 1);
}} else {{
panzoomInstance.zoomAbs(image.width / 2, image.height / 2, 3);
}}
}});
image.addEventListener('dblclick', () => {{
const xys = panzoomInstance.getTransform()
if(xys.scale > 1) {{
const fScale = 1 - xys.scale
const fixeX = xys.x / fScale
const fixeY = xys.y / fScale
panzoomInstance.smoothZoomAbs(fixeX, fixeY, 1)
}} else {{
panzoomInstance.moveBy(-xys.x, -xys.y, true)
panzoomInstance.smoothZoomAbs(xys.x, xys.y, 1)
}}
panzoomInstance.moveTo(0, 0)
panzoomInstance.zoomAbs(0, 0, 1)
}});
}});
</script>
</body>
</html>
"""
# # Embed the HTML and JavaScript into the Streamlit app
# st.components.v1.html(html_code, height=300)
# Display images in a 3x3 grid
cols = st.columns(3)
for i in range(3):
# for j in range(3):
# with cols[j]:
# st.image(generated_images[i * 3 + j], use_column_width=True)
# Display the image with zoom effect
# container_style = """
# <div class="container">
# <img class="zoom" src="{}" style="width:100%;">
# </div>
# """.format(image_url)
# st.markdown(container_style, unsafe_allow_html=True)
st.components.v1.html(html_code, height=600)
else:
st.write("Please enter a prompt.")