Spaces:
Runtime error
Runtime error
| import streamlit as st | |
| from huggingface_hub import hf_hub_download | |
| import torch | |
| from PIL import Image | |
| from torchvision import transforms | |
| from skimage.color import rgb2lab, lab2rgb | |
| import numpy as np | |
| import matplotlib.pyplot as plt | |
| from io import BytesIO | |
| import requests | |
| from io import BytesIO | |
| # Download the model from Hugging Face Hub | |
| repo_id = "Hammad712/GAN-Colorization-Model" | |
| model_filename = "generator.pt" | |
| model_path = hf_hub_download(repo_id=repo_id, filename=model_filename) | |
| # Define the generator model (same architecture as used during training) | |
| from fastai.vision.learner import create_body | |
| from torchvision.models import resnet34 | |
| from fastai.vision.models.unet import DynamicUnet | |
| def build_generator(n_input=1, n_output=2, size=256): | |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| backbone = create_body(resnet34(), pretrained=True, n_in=n_input, cut=-2) | |
| G_net = DynamicUnet(backbone, n_output, (size, size)).to(device) | |
| return G_net | |
| # Initialize and load the model | |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| G_net = build_generator(n_input=1, n_output=2, size=256) | |
| G_net.load_state_dict(torch.load(model_path, map_location=device)) | |
| G_net.eval() | |
| # Preprocessing function | |
| def preprocess_image(img): | |
| img = img.convert("RGB") | |
| img = transforms.Resize((256, 256), Image.BICUBIC)(img) | |
| img = np.array(img) | |
| img_to_lab = rgb2lab(img).astype("float32") | |
| img_to_lab = transforms.ToTensor()(img_to_lab) | |
| L = img_to_lab[[0], ...] / 50. - 1. | |
| return L.unsqueeze(0).to(device) | |
| # Inference function | |
| def colorize_image(img, model): | |
| L = preprocess_image(img) | |
| with torch.no_grad(): | |
| ab = model(L) | |
| L = (L + 1.) * 50. | |
| ab = ab * 110. | |
| Lab = torch.cat([L, ab], dim=1).permute(0, 2, 3, 1).cpu().numpy() | |
| rgb_imgs = [] | |
| for img in Lab: | |
| img_rgb = lab2rgb(img) | |
| rgb_imgs.append(img_rgb) | |
| return np.stack(rgb_imgs, axis=0) | |
| # Custom CSS | |
| def set_css(style): | |
| st.markdown(f"<style>{style}</style>", unsafe_allow_html=True) | |
| # Combined dark mode styles | |
| combined_css = """ | |
| .main, .sidebar .sidebar-content { background-color: #1c1c1c; color: #f0f2f6; } | |
| .block-container { padding: 1rem 2rem; background-color: #333; border-radius: 10px; box-shadow: 0px 4px 10px rgba(0, 0, 0, 0.5); } | |
| .stButton>button, .stDownloadButton>button { background: linear-gradient(135deg, #ff7e5f, #feb47b); color: white; border: none; padding: 10px 24px; text-align: center; text-decoration: none; display: inline-block; font-size: 16px; margin: 4px 2px; cursor: pointer; border-radius: 5px; } | |
| .stSpinner { color: #4CAF50; } | |
| .title { | |
| font-size: 3rem; | |
| font-weight: bold; | |
| display: flex; align-items: center; | |
| justify-content: center; | |
| } | |
| .colorful-text { | |
| background: -webkit-linear-gradient(135deg, #ff7e5f, #feb47b); | |
| -webkit-background-clip: text; | |
| -webkit-text-fill-color: transparent; | |
| } | |
| .black-white-text { | |
| color: black; | |
| } | |
| .small-input .stTextInput>div>input { | |
| height: 2rem; | |
| font-size: 0.9rem; | |
| } | |
| .small-file-uploader .stFileUploader>div>div { | |
| height: 2rem; | |
| font-size: 0.9rem; | |
| } | |
| .custom-text { | |
| font-size: 1.2rem; | |
| color: #feb47b; | |
| text-align: center; | |
| margin-top: -20px; | |
| margin-bottom: 20px; | |
| } | |
| """ | |
| # Streamlit application | |
| st.set_page_config(layout="wide") | |
| st.markdown(f"<style>{combined_css}</style>", unsafe_allow_html=True) | |
| st.markdown('<div class="title"><span class="black-white-text">Image</span> <span class="colorful-text">Colorization</span></div>', unsafe_allow_html=True) | |
| st.markdown('<div class="custom-text">Convert black and white images to color using AI</div>', unsafe_allow_html=True) | |
| # Input for image URL or file upload | |
| with st.expander("Input Options", expanded=True): | |
| uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png", "webp"], key="upload_file", help="Upload an image file to convert") | |
| url_input = st.text_input("Or enter an image URL", key="url_input", help="Enter the URL of an image to convert") | |
| # Run inference button | |
| if st.button("Colorize"): | |
| img = None | |
| if uploaded_file is not None: | |
| img = Image.open(uploaded_file) | |
| elif url_input: | |
| try: | |
| response = requests.get(url_input) | |
| img = Image.open(BytesIO(response.content)) | |
| except Exception as e: | |
| st.error(f"Error fetching the image from URL: {e}") | |
| if img is not None: | |
| with st.spinner('Processing...'): | |
| try: | |
| colorized_images = colorize_image(img, G_net) | |
| colorized_image = colorized_images[0] | |
| # Display original and colorized images side by side | |
| st.markdown("### Result") | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| st.image(img, caption='Original Image', use_column_width=True) | |
| with col2: | |
| st.image(colorized_image, caption='Colorized Image', use_column_width=True) | |
| # Provide a download button for the colorized image | |
| img_byte_arr = BytesIO() | |
| Image.fromarray((colorized_image * 255).astype(np.uint8)).save(img_byte_arr, format='JPEG') | |
| img_byte_arr = img_byte_arr.getvalue() | |
| st.download_button( | |
| label="Download Colorized Image", | |
| data=img_byte_arr, | |
| file_name="colorized_image.jpg", | |
| mime="image/jpeg" | |
| ) | |
| st.success("Image processed successfully!") | |
| except Exception as e: | |
| st.error(f"An error occurred: {e}") | |
| else: | |
| st.error("Please upload an image file or provide a valid URL.") | |