pszemraj commited on
Commit
e950125
1 Parent(s): 0cef1e2

updates-v2

Browse files
Files changed (2) hide show
  1. app.py +1 -2
  2. summarize.py +3 -3
app.py CHANGED
@@ -26,7 +26,6 @@ logging.basicConfig(
26
 
27
  MODEL_OPTIONS = [
28
  "pszemraj/led-large-book-summary",
29
- "pszemraj/led-large-book-summary-continued",
30
  "pszemraj/led-base-book-summary",
31
  ]
32
 
@@ -341,7 +340,7 @@ if __name__ == "__main__":
341
  "- The model can be used with tag [pszemraj/led-large-book-summary](https://huggingface.co/pszemraj/led-large-book-summary). See the model card for details on usage & a Colab notebook for a tutorial."
342
  )
343
  gr.Markdown(
344
- "- **Update May 1, 2023:** Enabled faster inference times via `use_cache=True`, the number of words the model will processed has been increased! New [test model](https://huggingface.co/pszemraj/led-large-book-summary-continued) as an extension of `led-large-book-summary`."
345
  )
346
  gr.Markdown("---")
347
 
26
 
27
  MODEL_OPTIONS = [
28
  "pszemraj/led-large-book-summary",
 
29
  "pszemraj/led-base-book-summary",
30
  ]
31
 
340
  "- The model can be used with tag [pszemraj/led-large-book-summary](https://huggingface.co/pszemraj/led-large-book-summary). See the model card for details on usage & a Colab notebook for a tutorial."
341
  )
342
  gr.Markdown(
343
+ "- **Update May 1, 2023:** Enabled faster inference times via `use_cache=True`, the number of words the model will processed has been increased! Not on this demo, but there is a [test model](https://huggingface.co/pszemraj/led-large-book-summary-continued) available: an extension of `led-large-book-summary`."
344
  )
345
  gr.Markdown("---")
346
 
summarize.py CHANGED
@@ -127,7 +127,7 @@ def summarize_via_tokenbatches(
127
  in_id_arr, att_arr = encoded_input.input_ids, encoded_input.attention_mask
128
  gen_summaries = []
129
 
130
- pbar = tqdm(total=len(in_id_arr))
131
 
132
  for _id, _mask in zip(in_id_arr, att_arr):
133
  result, score = summarize_and_score(
@@ -144,9 +144,9 @@ def summarize_via_tokenbatches(
144
  "summary_score": score,
145
  }
146
  gen_summaries.append(_sum)
147
- print(f"\t{result[0]}\nScore:\t{score}")
148
  pbar.update()
149
 
150
  pbar.close()
151
-
152
  return gen_summaries
127
  in_id_arr, att_arr = encoded_input.input_ids, encoded_input.attention_mask
128
  gen_summaries = []
129
 
130
+ pbar = tqdm(total=len(in_id_arr), desc="Summarizing")
131
 
132
  for _id, _mask in zip(in_id_arr, att_arr):
133
  result, score = summarize_and_score(
144
  "summary_score": score,
145
  }
146
  gen_summaries.append(_sum)
147
+ logger.info(f"SCore {score} for summary:\n\t{result}")
148
  pbar.update()
149
 
150
  pbar.close()
151
+ logger.debug(f"Generated summaries:\n{pp.pformat(gen_summaries)}")
152
  return gen_summaries