merve HF staff commited on
Commit
6eafad6
1 Parent(s): fc8e319

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +71 -0
app.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import streamlit as st
3
+
4
+ from transformers import AutoTokenizer, AutoModelWithLMHead
5
+
6
+ tokenizer = AutoTokenizer.from_pretrained("gpt2-large")
7
+
8
+ model = AutoModelWithLMHead.from_pretrained("gpt2-large")
9
+
10
+
11
+
12
+
13
+ def infer(input_ids, max_length, temperature, top_k, top_p, do_sample, num_return_sequences):
14
+
15
+ output_sequences = model.generate(
16
+ input_ids=input_ids,
17
+ max_length=args.length + len(encoded_prompt[0]),
18
+ temperature=temperature,
19
+ top_k=top_k,
20
+ top_p=top_p,
21
+ do_sample=True,
22
+ num_return_sequences=num_return_sequences,
23
+ )
24
+
25
+ return output_sequences
26
+ default_value = "See how a modern neural network auto-completes your text 🤗 This site, built by the Hugging Face team, lets you write a whole document directly from your browser, and you can trigger the Transformer anywhere using the Tab key. Its like having a smart machine that completes your thoughts 😀 Get started by typing a custom snippet, check out the repository, or try one of the examples. Have fun!"
27
+
28
+ #prompts
29
+ st.title("Write with Transformers 🦄")
30
+ st.write("The almighty king of text generation, GPT-2 comes in four available sizes, only three of which have been publicly made available. Feared for its fake news generation capabilities, it currently stands as the most syntactically coherent model. A direct successor to the original GPT, it reinforces the already established pre-training/fine-tuning killer duo. From the paper: Language Models are Unsupervised Multitask Learners by Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei and Ilya Sutskever.")
31
+
32
+ sent = st.text_area("Text", default_value, height = 275)
33
+ max_length = st.sidebar.slider("Max Length", min_value = 10, max_value=30)
34
+ temperature = st.sidebar.slider("Temperature", min_value = 0, max_value=1)
35
+ top_k = st.sidebar.slider("Top-k", min_value = 1, max_value=5)
36
+ top_p = st.sidebar.slider("Top-p", min_value = 0, max_value=1)
37
+ num_return_sequences = st.sidebar.number_input('Number of Sequences to be Generated', min_value=1, max_value=5, value=1, step=1)
38
+
39
+
40
+ encoded_prompt = tokenizer.encode(sent, add_special_tokens=False, return_tensors="pt")
41
+ if encoded_prompt.size()[-1] == 0:
42
+ input_ids = None
43
+ else:
44
+ input_ids = encoded_prompt
45
+
46
+
47
+ output_sequences = infer(input_ids, max_length, temperature, top_k, top_p, do_sample, num_return_sequences)
48
+
49
+
50
+ for generated_sequence_idx, generated_sequence in enumerate(output_sequences):
51
+ print(f"=== GENERATED SEQUENCE {generated_sequence_idx + 1} ===")
52
+ generated_sequence = generated_sequence.tolist()
53
+
54
+ # Decode text
55
+ text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)
56
+
57
+ # Remove all text after the stop token
58
+ text = text[: text.find(args.stop_token) if args.stop_token else None]
59
+
60
+ # Add the prompt at the beginning of the sequence. Remove the excess text that was used for pre-processing
61
+ total_sequence = (
62
+ sent + text[len(tokenizer.decode(encoded_prompt[0], clean_up_tokenization_spaces=True)) :]
63
+ )
64
+
65
+ generated_sequences.append(total_sequence)
66
+ print(total_sequence)
67
+
68
+
69
+ st.write(generated_sequences)
70
+
71
+