Spaces:
Runtime error
Runtime error
TogetherAI
commited on
Commit
β’
f9e0a72
1
Parent(s):
b4d6d38
Update app.py
Browse files
app.py
CHANGED
@@ -1,42 +1,20 @@
|
|
|
|
1 |
import gradio as gr
|
2 |
-
import requests
|
3 |
-
from PIL import Image
|
4 |
-
from io import BytesIO
|
5 |
-
import base64
|
6 |
|
7 |
-
|
|
|
8 |
|
9 |
-
def
|
10 |
-
data = "?model=" + model + "&prompt=" + prompt + "&seed=" + str(seed) + "&negative_prompt=" + negative_prompt + "&sampler=" + sampler + "&steps=" + str(steps)
|
11 |
-
response = requests.post(api_url + data, timeout=400)
|
12 |
-
if response.status_code == 200:
|
13 |
-
img_base64 = response.json()["url"]
|
14 |
-
img_bytes = base64.b64decode(img_base64)
|
15 |
-
img = Image.open(BytesIO(img_bytes))
|
16 |
-
return img
|
17 |
-
else:
|
18 |
-
return None
|
19 |
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
dropdown_sampler = gr.inputs.Dropdown(["k_lms", "k_heun", "k_euler", "k_euler_a", "k_dpm_2", "k_dpm_2_a", "DDIM", "k_dpm_fast", "k_dpm_adaptive", "k_dpmpp_2m", "k_dpmpp_2s_a", "k_dpmpp_sde"], label="Sampler", default="k_dpmpp_2s_a")
|
25 |
-
number_steps = gr.inputs.Number(label="Steps", default=50)
|
26 |
|
27 |
-
|
|
|
|
|
|
|
|
|
28 |
|
29 |
-
outputs = gr.outputs.Image(label="Generated Image", type="pil")
|
30 |
|
31 |
-
interface = gr.Interface(
|
32 |
-
fn=generate_image,
|
33 |
-
inputs=inputs,
|
34 |
-
outputs=outputs,
|
35 |
-
title="Image Generator",
|
36 |
-
description="Select options and enter a prompt to generate an image.",
|
37 |
-
examples=[],
|
38 |
-
layout="vertical",
|
39 |
-
theme="ParityError/Interstellar" # Ihr spezifiziertes Theme
|
40 |
-
)
|
41 |
-
|
42 |
-
interface.launch()
|
|
|
1 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
2 |
import gradio as gr
|
|
|
|
|
|
|
|
|
3 |
|
4 |
+
tokenizer = AutoTokenizer.from_pretrained("merve/chatgpt-prompts-bart-long")
|
5 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("merve/chatgpt-prompts-bart-long", from_tf=True)
|
6 |
|
7 |
+
def generate(prompt):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
+
batch = tokenizer(prompt, return_tensors="pt")
|
10 |
+
generated_ids = model.generate(batch["input_ids"], max_new_tokens=150)
|
11 |
+
output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
|
12 |
+
return output[0]
|
|
|
|
|
13 |
|
14 |
+
input_component = gr.Textbox(label = "Input a persona, e.g. photographer", value = "photographer")
|
15 |
+
output_component = gr.Textbox(label = "Prompt")
|
16 |
+
examples = [["photographer"], ["developer"]]
|
17 |
+
description = "This app generates ChatGPT prompts, it's based on a BART model trained on [this dataset](https://huggingface.co/datasets/fka/awesome-chatgpt-prompts). π Simply enter a persona that you want the prompt to be generated based on. π§π»π§π»βππ§π»βπ¨π§π»βπ¬π§π»βπ»π§πΌβπ«π§π½βπΎ"
|
18 |
+
gr.Interface(generate, inputs = input_component, outputs=output_component, examples=examples, title = "π¨π»βπ€ ChatGPT Prompt Generator π¨π»βπ€", description=description).launch()
|
19 |
|
|
|
20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|