File size: 1,937 Bytes
0dfbf6a
ed907c0
a791790
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2986a2f
 
 
0424af2
a791790
2986a2f
06ee334
 
2986a2f
 
 
 
0424af2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a791790
0dfbf6a
ed907c0
0dfbf6a
ed907c0
 
 
0dfbf6a
ed907c0
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import gradio as gr
from transformers import pipeline
import multiprocessing
import torch
import os
from psutil import virtual_memory
from pathlib import Path
import random
from datetime import datetime
import PIL
from PIL import Image
import os
import gc
from translatepy import Translator, Language
ts = Translator()
from deep_translator import DeeplTranslator, GoogleTranslator
from pathlib import Path
import csv
import logging
import requests
from rudalle.pipelines import generate_images, show, super_resolution, cherry_pick_by_ruclip
from rudalle import get_rudalle_model, get_tokenizer, get_vae, get_realesrgan
from rudalle.utils import seed_everything
#from ruclip import load as get_ruclip

# prepare models:
#device = 'cuda'
device = "cuda" if torch.cuda.is_available() else "cpu"
model = get_rudalle_model('Malevich', pretrained=True, fp16=True, device=device)
tokenizer = get_tokenizer()
vae = get_vae(dwt=True).to(device)

# pipeline utils:
#TEST--------
realesrgan = get_realesrgan('x2', device=device)
clip, processor = ruclip.load('ruclip-vit-base-patch32-384', device=device)
clip_predictor = ruclip.Predictor(clip, processor, device, bs=8)
text = 'радуга на фоне ночного города'

seed_everything(42)
pil_images = []
scores = []
for top_k, top_p, images_num in [
    (2048, 0.995, 24),
]:
    _pil_images, _scores = generate_images(text, tokenizer, dalle, vae, top_k=top_k, images_num=images_num, bs=8, top_p=top_p)
    pil_images += _pil_images
    scores += _scores

show(pil_images, 6)
#TEST--------


pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")

def predict(image):
  predictions = pipeline(image)
  return {p["label"]: p["score"] for p in predictions}

gr.Interface(
    predict,
    inputs=gr.inputs.Image(label="Upload hot dog candidate", type="filepath"),
    outputs=gr.outputs.Label(num_top_classes=2),
    title="Hot Dog? Or Not?",
).launch()