Spaces:
Runtime error
Runtime error
import gradio as gr | |
from transformers import pipeline | |
import multiprocessing | |
import torch | |
import os | |
from psutil import virtual_memory | |
from pathlib import Path | |
import random | |
from datetime import datetime | |
import PIL | |
from PIL import Image | |
import os | |
import gc | |
from translatepy import Translator, Language | |
ts = Translator() | |
from deep_translator import DeeplTranslator, GoogleTranslator | |
from pathlib import Path | |
import csv | |
import logging | |
import requests | |
from rudalle.pipelines import generate_images, show, super_resolution, cherry_pick_by_ruclip | |
from rudalle import get_rudalle_model, get_tokenizer, get_vae, get_realesrgan | |
from rudalle.utils import seed_everything | |
#from ruclip import load as get_ruclip | |
import ruclip | |
# prepare models: | |
#device = 'cuda' | |
#device = "cuda" if torch.cuda.is_available() else "CPU" | |
device = "cuda" | |
print('Using device:', device) | |
model = get_rudalle_model('Malevich', pretrained=True, fp16=True, device=device) | |
tokenizer = get_tokenizer() | |
vae = get_vae(dwt=True).to(device) | |
# pipeline utils: | |
#TEST-------- | |
realesrgan = get_realesrgan('x2', device=device) | |
clip, processor = ruclip.load('ruclip-vit-base-patch32-384', device=device) | |
clip_predictor = ruclip.Predictor(clip, processor, device, bs=8) | |
text = 'радуга на фоне ночного города' | |
#seed_everything(42) | |
#pil_images = [] | |
#scores = [] | |
#for top_k, top_p, images_num in [ | |
# (2048, 0.995, 6), | |
#]: | |
# _pil_images, _scores = generate_images(text, tokenizer, model, vae, top_k=top_k, images_num=images_num, #bs=8, top_p=top_p) | |
# pil_images += _pil_images | |
# scores += _scores | |
def model(text, tokenizer=tokenizer, dalle=dalle, vae=vae, top_k=2048, images_num=1, bs=8, top_p=0.995): | |
# _pil_images, _scores = generate_images(text, tokenizer, dalle, vae, top_k=top_k, images_num=images_num, bs=8, top_p=top_p) | |
pil_images = generate_images(text, tokenizer, dalle, vae, top_k=top_k, images_num=images_num, bs=8, top_p=top_p) | |
pil_images = pil_images[0] | |
return pil_images | |
seed_everything(42) | |
pil_images = [] | |
scores = [] | |
top_k = 2048 | |
top_p = 0.995 | |
images_num = 1 | |
iface = gr.Interface(fn=model, | |
inputs=[gr.inputs.Textbox(label="Text prompt")], | |
outputs=[gr.outputs.Image(type="pil", label="Generated Image")]).launch() | |
#TEST-------- | |
#pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog") | |
#def predict(image): | |
# predictions = pipeline(image) | |
# return {p["label"]: p["score"] for p in predictions} | |
#gr.Interface( | |
# predict, | |
# inputs=gr.inputs.Image(label="Upload hot dog candidate", type="filepath"), | |
# outputs=gr.outputs.Label(num_top_classes=2), | |
# title="Hot Dog? Or Not?", | |
#).launch() |