Mel Nguyen (she/her) commited on
Commit
a26c6e8
1 Parent(s): 457b136
Files changed (2) hide show
  1. app.py +46 -0
  2. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import transformers
2
+ import streamlit as st
3
+
4
+ from transformers import AutoTokenizer, AutoModelForCausalLM
5
+
6
+ tokenizer = AutoTokenizer.from_pretrained("VietAI/gpt-neo-1.3B-vietnamese-news")
7
+ @st.cache
8
+ def load_model(model_name):
9
+ model = AutoModelForCausalLM.from_pretrained(model_name)
10
+ return model
11
+
12
+ model = load_model("VietAI/gpt-neo-1.3B-vietnamese-news")
13
+ def infer(input_ids, max_length):
14
+
15
+ output_sequences = model.generate(
16
+ input_ids=input_ids,
17
+ max_length=max_length,
18
+ do_sample=True,
19
+ temperature=0.9,
20
+ top_k=20,
21
+ #top_p=top_p,
22
+ #num_return_sequences=1
23
+ )
24
+
25
+ return output_sequences
26
+
27
+ default_value = "Have fun!"
28
+
29
+ st.title("Write with Transformers 🦄")
30
+ st.write("Generate Vietnamese text from a given prompt")
31
+
32
+ sent = st.text_area("Text", default_value, height = 275)
33
+ max_length = st.sidebar.slider("Max Length", min_value = 10, max_value=30)
34
+ #temperature = st.sidebar.slider("Temperature", value = 1.0, min_value = 0.0, max_value=1.0, step=0.05)
35
+ #top_k = st.sidebar.slider("Top-k", min_value = 0, max_value=5, value = 0)
36
+ #top_p = st.sidebar.slider("Top-p", min_value = 0.0, max_value=1.0, step = 0.05, value = 0.9)
37
+
38
+ encoded_prompt = tokenizer.encode(sent, add_special_tokens=False, return_tensors="pt")
39
+ if encoded_prompt.size()[-1] == 0:
40
+ input_ids = None
41
+ else:
42
+ input_ids = encoded_prompt
43
+
44
+ gen_tokens = infer(encoded_prompt, max_length)
45
+ gen_text = tokenizer.batch_decode(gen_tokens)[0]
46
+ st.write(gen_text)
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ transformers
2
+ streamlit
3
+ torch