Spaces:
Sleeping
Sleeping
| import os | |
| import streamlit as st | |
| import tensorflow as tf | |
| import numpy as np | |
| from huggingface_hub import HfApi, hf_hub_download | |
| from PIL import Image | |
| from io import BytesIO | |
| import requests | |
| # Hugging Face credentials | |
| api = HfApi() | |
| # Set your Hugging Face username and model repository name | |
| username = "Hammad712" | |
| repo_name = "CycleGAN-Model" | |
| repo_id = f"{username}/{repo_name}" | |
| # Download model files from Hugging Face | |
| local_dir = "CycleGAN" # Changed to a relative path | |
| os.makedirs(local_dir, exist_ok=True) | |
| for file in api.list_repo_files(repo_id=repo_id, repo_type="model"): | |
| hf_hub_download(repo_id=repo_id, filename=file, local_dir=local_dir) | |
| # Load the model | |
| custom_objects = {'InstanceNormalization': tf.keras.layers.Layer} # Adjust custom objects as needed | |
| loaded_model = tf.keras.models.load_model(local_dir, custom_objects=custom_objects) | |
| # Helper functions | |
| def load_and_preprocess_image(image): | |
| img = image.resize((256, 256)) | |
| img = np.array(img) | |
| img = (img - 127.5) / 127.5 # Normalize to [-1, 1] | |
| img = np.expand_dims(img, axis=0) # Add batch dimension | |
| return img | |
| def infer_image(model, image): | |
| preprocessed_img = load_and_preprocess_image(image) | |
| generated_img = model(preprocessed_img, training=False) | |
| generated_img = tf.squeeze(generated_img, axis=0) # Remove batch dimension | |
| generated_img = (generated_img * 127.5 + 127.5).numpy().astype(np.uint8) # De-normalize to [0, 255] | |
| return generated_img | |
| def load_image_from_url(url): | |
| response = requests.get(url) | |
| img = Image.open(BytesIO(response.content)) | |
| return img | |
| # Custom CSS | |
| combined_css = """ | |
| .main, .sidebar .sidebar-content { background-color: #1c1c1c; color: #f0f2f6; } | |
| .block-container { padding: 1rem 2rem; background-color: #333; border-radius: 10px; box-shadow: 0px 4px 10px rgba(0, 0, 0, 0.5); } | |
| .stButton>button, .stDownloadButton>button { background: linear-gradient(135deg, #ff7e5f, #feb47b); color: white; border: none; padding: 10px 24px; text-align: center; text-decoration: none; display: inline-block; font-size: 16px; margin: 4px 2px; cursor: pointer; border-radius: 5px; } | |
| .stSpinner { color: #4CAF50; } | |
| .title { | |
| font-size: 3rem; | |
| font-weight: bold; | |
| display: flex; | |
| align-items: center; | |
| justify-content: center; | |
| } | |
| .colorful-text { | |
| background: -webkit-linear-gradient(135deg, #ff7e5f, #feb47b); | |
| -webkit-background-clip: text; | |
| -webkit-text-fill-color: transparent; | |
| } | |
| .black-white-text { | |
| color: black; | |
| } | |
| .small-input .stTextInput>div>input { | |
| height: 2rem; | |
| font-size: 0.9rem; | |
| } | |
| .small-file-uploader .stFileUploader>div>div { | |
| height: 2rem; | |
| font-size: 0.9rem; | |
| } | |
| .custom-text { | |
| font-size: 1.2rem; | |
| color: #feb47b; | |
| text-align: center; | |
| margin-top: -20px; | |
| margin-bottom: 20px; | |
| } | |
| """ | |
| # Streamlit application | |
| st.set_page_config(layout="wide") | |
| st.markdown(f"<style>{combined_css}</style>", unsafe_allow_html=True) | |
| st.markdown('<div class="title"><span class="colorful-text">Photo</span> <span class="black-white-text">to Van Gogh</span></div>', unsafe_allow_html=True) | |
| st.markdown('<div class="custom-text">Convert photos to Van Gogh style using AI</div>', unsafe_allow_html=True) | |
| # Streamlit UI | |
| uploaded_file = st.file_uploader("Choose an image...", type="jpg") | |
| image_url = st.text_input("Or enter an image URL:") | |
| image = None | |
| if uploaded_file is not None: | |
| image = Image.open(uploaded_file) | |
| elif image_url: | |
| try: | |
| image = load_image_from_url(image_url) | |
| except Exception as e: | |
| st.error(f"Failed to load image from URL: {e}") | |
| if image is not None: | |
| if st.button("Run Inference"): | |
| # Perform inference | |
| with st.spinner('Processing...'): | |
| generated_image = infer_image(loaded_model, image) | |
| # Display the original and generated images side by side | |
| st.markdown("### Result") | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| st.image(image, caption='Original Image', use_column_width=True) | |
| with col2: | |
| st.image(generated_image, caption='Generated Image', use_column_width=True) | |
| # Provide a download button for the generated image | |
| img_byte_arr = BytesIO() | |
| Image.fromarray(generated_image).save(img_byte_arr, format='JPEG') | |
| img_byte_arr = img_byte_arr.getvalue() | |
| st.download_button( | |
| label="Download Generated Image", | |
| data=img_byte_arr, | |
| file_name="generated_image.jpg", | |
| mime="image/jpeg" | |
| ) | |
| st.success("Image processed successfully!") | |