import cv2 import gradio as gr import os from PIL import Image import numpy as np import torch from torch.autograd import Variable from torchvision import transforms import torch.nn.functional as F import gdown import warnings warnings.filterwarnings("ignore") if not os.path.exists("DIS"): os.system("git clone https://github.com/xuebinqin/DIS") if not os.path.exists("IS-Net"): os.system("mv DIS/IS-Net/* .") # project imports from data_loader_cache import normalize, im_reader, im_preprocess from models import * # Helpers device = 'cuda' if torch.cuda.is_available() else 'cpu' # Download official weights if not os.path.exists("saved_models"): os.mkdir("saved_models") if not os.path.exists("saved_models/isnet.pth"): os.system("mv isnet.pth saved_models/") class GOSNormalize(object): def __init__(self, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]): self.mean = mean self.std = std def __call__(self, image): image = normalize(image, self.mean, self.std) return image transform = transforms.Compose([GOSNormalize([0.5, 0.5, 0.5], [1.0, 1.0, 1.0])]) def load_image(im_path, hypar): im = im_reader(im_path) im, im_shp = im_preprocess(im, hypar["cache_size"]) im = torch.divide(im, 255.0) shape = torch.from_numpy(np.array(im_shp)) return transform(im).unsqueeze(0), shape.unsqueeze(0) # make a batch of image, shape def build_model(hypar, device): net = hypar["model"] if hypar["model_digit"] == "half": net.half() for layer in net.modules(): if isinstance(layer, nn.BatchNorm2d): layer.float() net.to(device) if hypar["restore_model"] != "": net.load_state_dict(torch.load(hypar["model_path"] + "/" + hypar["restore_model"], map_location=device)) net.to(device) net.eval() return net def predict(net, inputs_val, shapes_val, hypar, device): net.eval() if hypar["model_digit"] == "full": inputs_val = inputs_val.type(torch.FloatTensor) else: inputs_val = inputs_val.type(torch.HalfTensor) inputs_val_v = Variable(inputs_val, requires_grad=False).to(device) ds_val = net(inputs_val_v)[0] pred_val = ds_val[0][0, :, :, :] pred_val = torch.squeeze(F.upsample(torch.unsqueeze(pred_val, 0), (shapes_val[0][0], shapes_val[0][1]), mode='bilinear')) ma = torch.max(pred_val) mi = torch.min(pred_val) pred_val = (pred_val - mi) / (ma - mi) if device == 'cuda': torch.cuda.empty_cache() return (pred_val.detach().cpu().numpy() * 255).astype(np.uint8) # Set Parameters hypar = {} hypar["model_path"] = "./saved_models" hypar["restore_model"] = "isnet.pth" hypar["interm_sup"] = False hypar["model_digit"] = "full" hypar["seed"] = 0 hypar["cache_size"] = [1024, 1024] hypar["input_size"] = [1024, 1024] hypar["crop_size"] = [1024, 1024] hypar["model"] = ISNetDIS() # Build Model net = build_model(hypar, device) def inference(image): image_tensor, orig_size = load_image(image, hypar) mask = predict(net, image_tensor, orig_size, hypar, device) pil_mask = Image.fromarray(mask).convert('L') im_rgb = Image.open(image).convert("RGB") im_rgba = im_rgb.copy() im_rgba.putalpha(pil_mask) return [im_rgba, pil_mask] # Translation texts translations = { "pl": { "title": "Zaawansowane Segmentowanie Obrazów", "description": """ **Zaawansowane Segmentowanie Obrazów** to zaawansowane narzędzie oparte na sztucznej inteligencji, zaprojektowane do precyzyjnego segmentowania obrazów. Aplikacja ta wykorzystuje najnowsze technologie głębokiego uczenia, aby generować dokładne maski dla różnych typów obrazów. Stworzona przez ekspertów, oferuje użytkownikom intuicyjny interfejs do przetwarzania obrazów. Niezależnie od tego, czy jest używana do celów zawodowych, czy do projektów osobistych, to narzędzie zapewnia najwyższą jakość i niezawodność w zadaniach segmentacji obrazów. **Technologie**: - Model: ISNetDIS - Stworzony przez: Rafał Dembski - Technologie: PyTorch, Gradio, OpenCV """, "article": "" }, "en": { "title": "Advanced Image Segmentation", "description": """ **Advanced Image Segmentation** is an advanced AI-based tool designed for precise image segmentation. This application utilizes the latest deep learning technologies to generate accurate masks for different types of images. Created by experts, it offers users an intuitive interface for image processing. Whether used for professional purposes or personal projects, this tool ensures the highest quality and reliability in image segmentation tasks. **Technologies**: - Model: ISNetDIS - Created by: Rafał Dembski - Technologies: PyTorch, Gradio, OpenCV """, "article": "" }, "de": { "title": "Fortgeschrittene Bildsegmentierung", "description": """ **Fortgeschrittene Bildsegmentierung** ist ein fortschrittliches, auf künstlicher Intelligenz basierendes Werkzeug, das für die präzise Bildsegmentierung entwickelt wurde. Diese Anwendung nutzt die neuesten Technologien des Deep Learnings, um genaue Masken für verschiedene Bildtypen zu erzeugen. Von Experten erstellt, bietet es den Benutzern eine intuitive Benutzeroberfläche für die Bildverarbeitung. Ob für berufliche Zwecke oder persönliche Projekte, dieses Werkzeug gewährleistet höchste Qualität und Zuverlässigkeit bei der Bildsegmentierung. **Technologien**: - Modell: ISNetDIS - Erstellt von: Rafał Dembski - Technologien: PyTorch, Gradio, OpenCV """, "article": "" } } css = """ #col-container { margin: 0 auto; max-width: 520px; } """ def change_language(lang): return translations[lang]['title'], translations[lang]['description'], translations[lang]['article'] with gr.Blocks(theme=gr.themes.Monochrome(), css=css) as demo: language = gr.State("en") with gr.Row(): language_selector = gr.Dropdown(choices=["pl", "en", "de"], value="en", label="Wybierz język / Select Language / Sprache auswählen", show_label=True) with gr.Column(elem_id="col-container"): gr.Image("logo.png", elem_id="logo-img", show_label=False, show_share_button=False, show_download_button=False) title = gr.Markdown(translations["en"]["title"]) description = gr.Markdown(translations["en"]["description"]) article = gr.Markdown(translations["en"]["article"]) inputs = gr.Image(type='filepath', label="Wybierz obraz") outputs = [gr.Image(label="Wynik (z przezroczystością)"), gr.Image(label="Maska")] run_button = gr.Button("Segmentuj", scale=0) run_button.click(fn=inference, inputs=inputs, outputs=outputs) language_selector.change( fn=change_language, inputs=language_selector, outputs=[title, description, article], api_name=False, ) demo.launch()