EinfachOlder celestinian commited on
Commit
b558ff5
0 Parent(s):

Duplicate from Celestinian/Prompt-Generator

Browse files

Co-authored-by: Skyler Celestinian-Sterling <Celestinian@users.noreply.huggingface.co>

Files changed (4) hide show
  1. .gitattributes +34 -0
  2. README.md +14 -0
  3. app.py +35 -0
  4. requirements.txt +6 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Prompt Generator
3
+ emoji: ⚡
4
+ colorFrom: red
5
+ colorTo: yellow
6
+ sdk: gradio
7
+ sdk_version: 3.27.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ duplicated_from: Celestinian/Prompt-Generator
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM, GPT2LMHeadModel
2
+ import gradio as gr
3
+ import torch
4
+ import git
5
+
6
+ device = "cuda" if torch.cuda.is_available() else "cpu"
7
+
8
+ tokenizer = AutoTokenizer.from_pretrained("Celestinian/PromptGPT")
9
+ model = AutoModelForCausalLM.from_pretrained("Celestinian/PromptGPT")
10
+
11
+ def generate_text(prompt, max_length, do_sample, temperature, no_repeat_ngram_size, top_k, top_p):
12
+ formatted_prompt = "\n" + prompt
13
+ if not ',' in prompt:
14
+ formatted_prompt += ','
15
+ prompt = tokenizer(formatted_prompt, return_tensors='pt')
16
+ prompt = {key: value.to(device) for key, value in prompt.items()}
17
+ out = model.generate(**prompt, max_length=max_length, do_sample=do_sample, temperature=temperature,
18
+ no_repeat_ngram_size=no_repeat_ngram_size, top_k=top_k, top_p=top_p)
19
+ output = tokenizer.decode(out[0])
20
+ clean_output = output.replace('\n', '\n')
21
+ print(clean_output)
22
+ return clean_output
23
+
24
+ input_text = gr.inputs.Textbox(lines=5, label="Input Text")
25
+ max_length = gr.inputs.Slider(minimum=10, maximum=100, default=30, label="Max Length")
26
+ do_sample = gr.inputs.Checkbox(default=True, label="Do Sample")
27
+ temperature = gr.inputs.Slider(minimum=0.1, maximum=1.0, step=0.1, default=0.4, label="Temperature")
28
+ no_repeat_ngram_size = gr.inputs.Slider(minimum=1, maximum=10, default=1, label="No Repeat N-Gram Size")
29
+ top_k = gr.inputs.Slider(minimum=1, maximum=100, default=50, label="Top K")
30
+ top_p = gr.inputs.Slider(minimum=0.1, maximum=1.0, step=0.1, default=0.2, label="Top P")
31
+
32
+ output_text = gr.outputs.Textbox(label="Generated Text")
33
+
34
+ gr.Interface(generate_text, inputs=[input_text, max_length, do_sample, temperature, no_repeat_ngram_size, top_k, top_p],
35
+ outputs=output_text).launch()
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ transformers==4.28.1
2
+ torch==2.0.0
3
+ torchvision==0.15.1
4
+ torchaudio==2.0.1
5
+ gitpython==3.1.24
6
+ accelerate==0.18.0