UNIST-Eunchan commited on
Commit
d17a7aa
β€’
1 Parent(s): a9ea355

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -4
app.py CHANGED
@@ -83,21 +83,24 @@ def chunking(book_text):
83
  '''
84
  '''
85
 
 
 
86
  #prompts
87
  st.title("Book Summarization πŸ“š")
88
  st.write("The almighty king of text generation, GPT-2 comes in four available sizes, only three of which have been publicly made available. Feared for its fake news generation capabilities, it currently stands as the most syntactically coherent model. A direct successor to the original GPT, it reinforces the already established pre-training/fine-tuning killer duo. From the paper: Language Models are Unsupervised Multitask Learners by Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei and Ilya Sutskever.")
89
 
90
  book_index = st.sidebar.slider("Select Book Example", value = 0,min_value = 0, max_value=4)
91
-
92
- _book = test_book[book_index]['book']
93
- chunked_segments = chunking(_book)
94
-
95
  sent = st.text_area("Text", _book[:512], height = 550)
96
  max_length = st.sidebar.slider("Max Length", value = 512,min_value = 10, max_value=1024)
97
  temperature = st.sidebar.slider("Temperature", value = 1.0, min_value = 0.0, max_value=1.0, step=0.05)
98
  top_k = st.sidebar.slider("Top-k", min_value = 0, max_value=5, value = 0)
99
  top_p = st.sidebar.slider("Top-p", min_value = 0.0, max_value=1.0, step = 0.05, value = 0.92)
100
 
 
 
 
 
 
101
  def generate_output(test_samples):
102
  inputs = tokenizer(
103
  test_samples,
 
83
  '''
84
  '''
85
 
86
+ _book = test_book[book_index]['book']
87
+
88
  #prompts
89
  st.title("Book Summarization πŸ“š")
90
  st.write("The almighty king of text generation, GPT-2 comes in four available sizes, only three of which have been publicly made available. Feared for its fake news generation capabilities, it currently stands as the most syntactically coherent model. A direct successor to the original GPT, it reinforces the already established pre-training/fine-tuning killer duo. From the paper: Language Models are Unsupervised Multitask Learners by Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei and Ilya Sutskever.")
91
 
92
  book_index = st.sidebar.slider("Select Book Example", value = 0,min_value = 0, max_value=4)
 
 
 
 
93
  sent = st.text_area("Text", _book[:512], height = 550)
94
  max_length = st.sidebar.slider("Max Length", value = 512,min_value = 10, max_value=1024)
95
  temperature = st.sidebar.slider("Temperature", value = 1.0, min_value = 0.0, max_value=1.0, step=0.05)
96
  top_k = st.sidebar.slider("Top-k", min_value = 0, max_value=5, value = 0)
97
  top_p = st.sidebar.slider("Top-p", min_value = 0.0, max_value=1.0, step = 0.05, value = 0.92)
98
 
99
+
100
+ chunked_segments = chunking(_book)
101
+
102
+
103
+
104
  def generate_output(test_samples):
105
  inputs = tokenizer(
106
  test_samples,