cosmicdream commited on
Commit
d25ec95
1 Parent(s): 5a71d15

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -17
app.py CHANGED
@@ -41,29 +41,45 @@ clip, processor = ruclip.load('ruclip-vit-base-patch32-384', device=device)
41
  clip_predictor = ruclip.Predictor(clip, processor, device, bs=8)
42
  text = 'радуга на фоне ночного города'
43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  seed_everything(42)
45
  pil_images = []
46
  scores = []
47
- for top_k, top_p, images_num in [
48
- (2048, 0.995, 6),
49
- ]:
50
- _pil_images, _scores = generate_images(text, tokenizer, model, vae, top_k=top_k, images_num=images_num, bs=8, top_p=top_p)
51
- pil_images += _pil_images
52
- scores += _scores
 
53
 
54
- #show(pil_images, 6)
55
  #TEST--------
56
 
57
 
58
- pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")
59
 
60
- def predict(image):
61
- predictions = pipeline(image)
62
- return {p["label"]: p["score"] for p in predictions}
63
 
64
- gr.Interface(
65
- predict,
66
- inputs=gr.inputs.Image(label="Upload hot dog candidate", type="filepath"),
67
- outputs=gr.outputs.Label(num_top_classes=2),
68
- title="Hot Dog? Or Not?",
69
- ).launch()
 
41
  clip_predictor = ruclip.Predictor(clip, processor, device, bs=8)
42
  text = 'радуга на фоне ночного города'
43
 
44
+ #seed_everything(42)
45
+ #pil_images = []
46
+ #scores = []
47
+ #for top_k, top_p, images_num in [
48
+ # (2048, 0.995, 6),
49
+ #]:
50
+ # _pil_images, _scores = generate_images(text, tokenizer, model, vae, top_k=top_k, images_num=images_num, #bs=8, top_p=top_p)
51
+ # pil_images += _pil_images
52
+ # scores += _scores
53
+
54
+ def model(text, tokenizer=tokenizer, dalle=dalle, vae=vae, top_k=2048, images_num=1, bs=8, top_p=0.995):
55
+ # _pil_images, _scores = generate_images(text, tokenizer, dalle, vae, top_k=top_k, images_num=images_num, bs=8, top_p=top_p)
56
+ pil_images = generate_images(text, tokenizer, dalle, vae, top_k=top_k, images_num=images_num, bs=8, top_p=top_p)
57
+ pil_images = pil_images[0]
58
+ return pil_images
59
+
60
  seed_everything(42)
61
  pil_images = []
62
  scores = []
63
+ top_k = 2048
64
+ top_p = 0.995
65
+ images_num = 1
66
+
67
+ iface = gr.Interface(fn=model,
68
+ inputs=[gr.inputs.Textbox(label="Text prompt"),
69
+ outputs=[gr.outputs.Image(type="pil", label="Generated Image")]).launch()
70
 
 
71
  #TEST--------
72
 
73
 
74
+ #pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")
75
 
76
+ #def predict(image):
77
+ # predictions = pipeline(image)
78
+ # return {p["label"]: p["score"] for p in predictions}
79
 
80
+ #gr.Interface(
81
+ # predict,
82
+ # inputs=gr.inputs.Image(label="Upload hot dog candidate", type="filepath"),
83
+ # outputs=gr.outputs.Label(num_top_classes=2),
84
+ # title="Hot Dog? Or Not?",
85
+ #).launch()