import gradio as gr from transformers import pipeline import multiprocessing import torch import os from psutil import virtual_memory from pathlib import Path import random from datetime import datetime import PIL from PIL import Image import os import gc from translatepy import Translator, Language ts = Translator() from deep_translator import DeeplTranslator, GoogleTranslator from pathlib import Path import csv import logging import requests from rudalle.pipelines import generate_images, show, super_resolution, cherry_pick_by_ruclip from rudalle import get_rudalle_model, get_tokenizer, get_vae, get_realesrgan from rudalle.utils import seed_everything #from ruclip import load as get_ruclip import ruclip # prepare models: #device = 'cuda' device = "cuda" if torch.cuda.is_available() else "cpu" #device = "cuda" print('Using device:', device) dalle = get_rudalle_model('Malevich', pretrained=True, fp16=True, device=device) tokenizer = get_tokenizer() vae = get_vae(dwt=True).to(device) # pipeline utils: #TEST-------- realesrgan = get_realesrgan('x2', device=device) clip, processor = ruclip.load('ruclip-vit-base-patch32-384', device=device) clip_predictor = ruclip.Predictor(clip, processor, device, bs=8) text = 'радуга на фоне ночного города' #seed_everything(42) #pil_images = [] #scores = [] #for top_k, top_p, images_num in [ # (2048, 0.995, 6), #]: # _pil_images, _scores = generate_images(text, tokenizer, model, vae, top_k=top_k, images_num=images_num, #bs=8, top_p=top_p) # pil_images += _pil_images # scores += _scores def model(text, tokenizer=tokenizer, dalle=dalle, vae=vae, top_k=2048, images_num=1, bs=8, top_p=0.995): pil_images = generate_images(text, tokenizer, dalle, vae, top_k=top_k, images_num=images_num, bs=8, top_p=top_p) pil_images = pil_images[0] return pil_images[0] seed_everything(42) pil_images = [] iface = gr.Interface(fn=model, title="V1", inputs=[gr.inputs.Textbox(label="Text prompt")], outputs=[gr.outputs.Image(type="pil", label="Generated Image")]).launch(debug=True)