Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,43 +1,19 @@
|
|
1 |
-
|
|
|
2 |
|
3 |
-
|
4 |
-
|
5 |
|
6 |
-
|
7 |
|
8 |
-
|
9 |
-
|
|
|
|
|
10 |
|
11 |
-
|
|
|
|
|
|
|
|
|
12 |
|
13 |
-
llm_chain = LLMChain(prompt=prompt,
|
14 |
-
llm=HuggingFaceHub(repo_id="google/flan-t5-xl",
|
15 |
-
model_kwargs={"temperature":0,
|
16 |
-
"max_length":64}))
|
17 |
-
|
18 |
-
|
19 |
-
from langchain.llms import HuggingFacePipeline
|
20 |
-
import torch
|
21 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, AutoModelForSeq2SeqLM, AutoTokenizer, AutoModelForSeq2SeqLM
|
22 |
-
|
23 |
-
model_id = 'Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum'
|
24 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
25 |
-
model = AutoModelForSeq2SeqLM.from_pretrained(model_id, from_tf=True)
|
26 |
-
|
27 |
-
pipeline = pipeline(
|
28 |
-
"text2text-generation",
|
29 |
-
model=model,
|
30 |
-
tokenizer=tokenizer,
|
31 |
-
max_length=128
|
32 |
-
)
|
33 |
-
|
34 |
-
local_llm = HuggingFacePipeline(pipeline=pipeline)
|
35 |
-
|
36 |
-
|
37 |
-
llm_chain = LLMChain(prompt=prompt,
|
38 |
-
llm=local_llm
|
39 |
-
)
|
40 |
-
|
41 |
-
question = "Excel Sheet"
|
42 |
-
|
43 |
-
print(llm_chain.run(question))
|
|
|
1 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
2 |
+
import gradio as gr
|
3 |
|
4 |
+
tokenizer = AutoTokenizer.from_pretrained("merve/chatgpt-prompts-bart-long")
|
5 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("merve/chatgpt-prompts-bart-long", from_tf=True)
|
6 |
|
7 |
+
def generate(prompt):
|
8 |
|
9 |
+
batch = tokenizer(prompt, return_tensors="pt")
|
10 |
+
generated_ids = model.generate(batch["input_ids"], max_new_tokens=150)
|
11 |
+
output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
|
12 |
+
return output[0]
|
13 |
|
14 |
+
input_component = gr.Textbox(label = "Input a persona, e.g. photographer", value = "photographer")
|
15 |
+
output_component = gr.Textbox(label = "Prompt")
|
16 |
+
examples = [["photographer"], ["developer"]]
|
17 |
+
description = "This app generates ChatGPT prompts, it's based on a BART model trained on [this dataset](https://huggingface.co/datasets/fka/awesome-chatgpt-prompts). π Simply enter a persona that you want the prompt to be generated based on. π§π»π§π»βππ§π»βπ¨π§π»βπ¬π§π»βπ»π§πΌβπ«π§π½βπΎ"
|
18 |
+
gr.Interface(generate, inputs = input_component, outputs=output_component, examples=examples, title = "π¨π»βπ€ ChatGPT Prompt Generator π¨π»βπ€", description=description).launch()
|
19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|