Spaces:
Runtime error
Runtime error
dreamdrop-art
commited on
Commit
•
c95bc41
1
Parent(s):
8a087d4
Update app.py
Browse files
app.py
CHANGED
@@ -1,9 +1,11 @@
|
|
1 |
import gradio as gr
|
2 |
import requests
|
3 |
import time
|
|
|
4 |
import json
|
5 |
import base64
|
6 |
import os
|
|
|
7 |
from io import BytesIO
|
8 |
import io
|
9 |
import html
|
@@ -433,6 +435,29 @@ with gr.Blocks(css=css) as demo:
|
|
433 |
|
434 |
hf_text_button.click(hf_inference, inputs=[hf_prompt, hf_negative_prompt, hf_model, hf_steps, sampler, hf_cfg_scale, hf_width, hf_height,
|
435 |
hf_seed], outputs=hf_image_output, concurrency_limit=64)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
436 |
with gr.Tab("BLIP"):
|
437 |
with gr.Tab("Base"):
|
438 |
gr.load("models/Salesforce/blip-image-captioning-base", title="BLIP-base")
|
@@ -444,7 +469,6 @@ with gr.Blocks(css=css) as demo:
|
|
444 |
# gr.load("models/mattmdjaga/segformer_b2_clothes", title="SegFormer Segmentation")
|
445 |
with gr.Tab("Visual Question Answering"):
|
446 |
gr.load("models/dandelin/vilt-b32-finetuned-vqa", title="ViLT VQA")
|
447 |
-
|
448 |
-
gr.load("models/facebook/detr-resnet-50", title="DETR OD")
|
449 |
|
450 |
demo.queue(max_size=80, api_open=False).launch(max_threads=256, show_api=False)
|
|
|
1 |
import gradio as gr
|
2 |
import requests
|
3 |
import time
|
4 |
+
import random
|
5 |
import json
|
6 |
import base64
|
7 |
import os
|
8 |
+
from transformers import pipeline, set_seed
|
9 |
from io import BytesIO
|
10 |
import io
|
11 |
import html
|
|
|
435 |
|
436 |
hf_text_button.click(hf_inference, inputs=[hf_prompt, hf_negative_prompt, hf_model, hf_steps, sampler, hf_cfg_scale, hf_width, hf_height,
|
437 |
hf_seed], outputs=hf_image_output, concurrency_limit=64)
|
438 |
+
with gr.Tab("Prompt Generator"):
|
439 |
+
gpt2_pipe = pipeline('text-generation', model='Gustavosta/MagicPrompt-Stable-Diffusion', tokenizer='gpt2')
|
440 |
+
with open("ideas.txt", "r") as f:
|
441 |
+
line = f.readlines()
|
442 |
+
def generate(starting_text):
|
443 |
+
seed = random.randint(100, 1000000)
|
444 |
+
set_seed(seed)
|
445 |
+
if starting_text == "":
|
446 |
+
starting_text: str = line[random.randrange(0, len(line))].replace("\n", "").lower().capitalize()
|
447 |
+
starting_text: str = re.sub(r"[,:\-–.!;?_]", '', starting_text)
|
448 |
+
response = gpt2_pipe(starting_text, max_length=(len(starting_text) + random.randint(60, 90)), num_return_sequences=4)
|
449 |
+
response_list = []
|
450 |
+
for x in response:
|
451 |
+
resp = x['generated_text'].strip()
|
452 |
+
if resp != starting_text and len(resp) > (len(starting_text) + 4) and resp.endswith((":", "-", "—")) is False:
|
453 |
+
response_list.append(resp+'\n')
|
454 |
+
response_end = "\n".join(response_list)
|
455 |
+
response_end = re.sub('[^ ]+\.[^ ]+','', response_end)
|
456 |
+
response_end = response_end.replace("<", "").replace(">", "")
|
457 |
+
|
458 |
+
if response_end != "":
|
459 |
+
return response_end
|
460 |
+
|
461 |
with gr.Tab("BLIP"):
|
462 |
with gr.Tab("Base"):
|
463 |
gr.load("models/Salesforce/blip-image-captioning-base", title="BLIP-base")
|
|
|
469 |
# gr.load("models/mattmdjaga/segformer_b2_clothes", title="SegFormer Segmentation")
|
470 |
with gr.Tab("Visual Question Answering"):
|
471 |
gr.load("models/dandelin/vilt-b32-finetuned-vqa", title="ViLT VQA")
|
472 |
+
|
|
|
473 |
|
474 |
demo.queue(max_size=80, api_open=False).launch(max_threads=256, show_api=False)
|