Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -2,20 +2,21 @@ import streamlit as st
|
|
2 |
import random
|
3 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
4 |
|
5 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
6 |
-
model = AutoModelForSeq2SeqLM.from_pretrained("
|
7 |
|
8 |
-
def generate(prompt):
|
9 |
batch = tokenizer(prompt, return_tensors="pt")
|
10 |
-
generated_ids = model.generate(batch["input_ids"], max_new_tokens=
|
11 |
output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
|
12 |
return output[0]
|
13 |
|
14 |
st.title("ChatGPT-BingChat Prompt Generator")
|
15 |
-
st.write("This app generates ChatGPT/BingChat & GPT-3 prompts using [this](https://huggingface.co/
|
16 |
prompt = st.text_input("Enter a Role, Example: Virtual Assistant", placeholder="Text here", value="")
|
|
|
17 |
if st.button("Generate"):
|
18 |
-
output = generate(prompt)
|
19 |
st.write("Generated Prompt:", box=True)
|
20 |
st.write("<div style='background-color: #2E2E2E; padding: 10px;'>{}</div>".format(output), unsafe_allow_html=True)
|
21 |
st.write("")
|
|
|
2 |
import random
|
3 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
4 |
|
5 |
+
tokenizer = AutoTokenizer.from_pretrained("Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum")
|
6 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum", from_tf=True)
|
7 |
|
8 |
+
def generate(prompt, max_new_tokens):
|
9 |
batch = tokenizer(prompt, return_tensors="pt")
|
10 |
+
generated_ids = model.generate(batch["input_ids"], max_new_tokens=max_new_tokens)
|
11 |
output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
|
12 |
return output[0]
|
13 |
|
14 |
st.title("ChatGPT-BingChat Prompt Generator")
|
15 |
+
st.write("This app generates ChatGPT/BingChat & GPT-3 prompts using [this](https://huggingface.co/Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum) model trained by [Kaludi](https://huggingface.co/Kaludi/). Enter a role and a prompt will be generated based on it.")
|
16 |
prompt = st.text_input("Enter a Role, Example: Virtual Assistant", placeholder="Text here", value="")
|
17 |
+
max_new_tokens = st.slider("Select Max Tokens in Response", min_value=100, max_value=500, value=150, step=10)
|
18 |
if st.button("Generate"):
|
19 |
+
output = generate(prompt, max_new_tokens)
|
20 |
st.write("Generated Prompt:", box=True)
|
21 |
st.write("<div style='background-color: #2E2E2E; padding: 10px;'>{}</div>".format(output), unsafe_allow_html=True)
|
22 |
st.write("")
|