hiwei commited on
Commit
fcdc317
1 Parent(s): e50986b

paper_preview_demo add an example & stream the result

Browse files
Files changed (1) hide show
  1. apps/paper_preview.py +26 -1
apps/paper_preview.py CHANGED
@@ -40,6 +40,9 @@ def paper_preview_demo(client):
40
  except Exception:
41
  yield traceback.format_exc()
42
 
 
 
 
43
  with gr.Row():
44
  with gr.Column():
45
  title = gr.Textbox(label="论文标题")
@@ -53,8 +56,30 @@ def paper_preview_demo(client):
53
 
54
  with gr.Column():
55
  outputs = gr.Textbox(label="速览内容", lines=5)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
 
57
  submit.click(
58
  preview, inputs=[title, abstract, temperature, top_p], outputs=outputs
59
  )
60
- clear.click(lambda x: (None, None, None), inputs=None, outputs=[title, abstract, outputs])
 
40
  except Exception:
41
  yield traceback.format_exc()
42
 
43
+ def clear_data():
44
+ return None, None
45
+
46
  with gr.Row():
47
  with gr.Column():
48
  title = gr.Textbox(label="论文标题")
 
56
 
57
  with gr.Column():
58
  outputs = gr.Textbox(label="速览内容", lines=5)
59
+ gr.Examples(
60
+ [
61
+ [
62
+ "GLM: General Language Model Pretraining with Autoregressive Blank Infilling",
63
+ "There have been various types of pretraining architectures including autoencoding models "
64
+ "(e.g., BERT), autoregressive models (e.g., GPT), and encoder-decoder models (e.g., T5). "
65
+ "However, none of the pretraining frameworks performs the best for all tasks of three main "
66
+ "categories including natural language understanding (NLU), unconditional generation, and "
67
+ "conditional generation. We propose a General Language Model (GLM) based on autoregressive "
68
+ "blank infilling to address this challenge. GLM improves blank filling pretraining by adding 2D"
69
+ " positional encodings and allowing an arbitrary order to predict spans, which results in "
70
+ "performance gains over BERT and T5 on NLU tasks. Meanwhile, GLM can be pretrained for "
71
+ "different types of tasks by varying the number and lengths of blanks. On a wide range of tasks"
72
+ " across NLU, conditional and unconditional generation, GLM outperforms BERT, T5, and GPT given"
73
+ " the same model sizes and data, and achieves the best performance from a single pretrained "
74
+ "model with 1.25x parameters of BERT Large , demonstrating its generalizability to different"
75
+ " downstream tasks.",
76
+ ]
77
+ ],
78
+ [title, abstract],
79
+ label="样例",
80
+ )
81
 
82
  submit.click(
83
  preview, inputs=[title, abstract, temperature, top_p], outputs=outputs
84
  )
85
+ clear.click(clear_data, inputs=None, outputs=[title, abstract])