Spaces:
Runtime error
Runtime error
File size: 2,083 Bytes
1dc335e ed907c0 a791790 2986a2f 0424af2 8fa6d79 a791790 2986a2f 06ee334 9775aa2 466917c da85889 2fda7e8 2986a2f 034dccf 2986a2f 0424af2 d25ec95 c0026e7 2fda7e8 d25ec95 0424af2 d25ec95 74d37ca e2ce6b6 74d37ca |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 |
import gradio as gr
from transformers import pipeline
import multiprocessing
import torch
import os
from psutil import virtual_memory
from pathlib import Path
import random
from datetime import datetime
import PIL
from PIL import Image
import os
import gc
from translatepy import Translator, Language
ts = Translator()
from deep_translator import DeeplTranslator, GoogleTranslator
from pathlib import Path
import csv
import logging
import requests
from rudalle.pipelines import generate_images, show, super_resolution, cherry_pick_by_ruclip
from rudalle import get_rudalle_model, get_tokenizer, get_vae, get_realesrgan
from rudalle.utils import seed_everything
#from ruclip import load as get_ruclip
import ruclip
# prepare models:
#device = 'cuda'
device = "cuda" if torch.cuda.is_available() else "cpu"
#device = "cuda"
print('Using device:', device)
dalle = get_rudalle_model('Malevich', pretrained=True, fp16=True, device=device)
tokenizer = get_tokenizer()
vae = get_vae(dwt=True).to(device)
# pipeline utils:
#TEST--------
realesrgan = get_realesrgan('x2', device=device)
clip, processor = ruclip.load('ruclip-vit-base-patch32-384', device=device)
clip_predictor = ruclip.Predictor(clip, processor, device, bs=8)
text = 'радуга на фоне ночного города'
#seed_everything(42)
#pil_images = []
#scores = []
#for top_k, top_p, images_num in [
# (2048, 0.995, 6),
#]:
# _pil_images, _scores = generate_images(text, tokenizer, model, vae, top_k=top_k, images_num=images_num, #bs=8, top_p=top_p)
# pil_images += _pil_images
# scores += _scores
def model(text, tokenizer=tokenizer, dalle=dalle, vae=vae, top_k=2048, images_num=1, bs=8, top_p=0.995):
pil_images = generate_images(text, tokenizer, dalle, vae, top_k=top_k, images_num=images_num, bs=8, top_p=top_p)
pil_images = pil_images[0]
return pil_images[0]
seed_everything(42)
pil_images = []
iface = gr.Interface(fn=model, title="V1",
inputs=[gr.inputs.Textbox(label="Text prompt")],
outputs=[gr.outputs.Image(type="pil", label="Generated Image")]).launch(debug=True)
|