windmaple commited on
Commit
5741766
1 Parent(s): 1f3031b

Create new file

Browse files
Files changed (1) hide show
  1. quickstart_sst_demo.py +72 -0
quickstart_sst_demo.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Lint as: python3
2
+ r"""Quick-start demo for a sentiment analysis model.
3
+
4
+ This demo fine-tunes a small Transformer (BERT-tiny) on the Stanford Sentiment
5
+ Treebank (SST-2), and starts a LIT server.
6
+
7
+ To run locally:
8
+ python -m lit_nlp.examples.quickstart_sst_demo --port=5432
9
+
10
+ Training should take less than 5 minutes on a single GPU. Once you see the
11
+ ASCII-art LIT logo, navigate to localhost:5432 to access the demo UI.
12
+ """
13
+ import sys
14
+ import tempfile
15
+
16
+ from absl import app
17
+ from absl import flags
18
+ from absl import logging
19
+
20
+ from lit_nlp import dev_server
21
+ from lit_nlp import server_flags
22
+ from lit_nlp.examples.datasets import glue
23
+ from lit_nlp.examples.models import glue_models
24
+
25
+ # NOTE: additional flags defined in server_flags.py
26
+
27
+ FLAGS = flags.FLAGS
28
+
29
+ FLAGS.set_default("development_demo", True)
30
+
31
+ flags.DEFINE_string(
32
+ "encoder_name", "google/bert_uncased_L-2_H-128_A-2",
33
+ "Encoder name to use for fine-tuning. See https://huggingface.co/models.")
34
+
35
+ flags.DEFINE_string("model_path", None, "Path to save trained model.")
36
+
37
+
38
+ def get_wsgi_app():
39
+ """Returns a LitApp instance for consumption by gunicorn."""
40
+ FLAGS.set_default("server_type", "external")
41
+ FLAGS.set_default("demo_mode", True)
42
+ # Parse flags without calling app.run(main), to avoid conflict with
43
+ # gunicorn command line flags.
44
+ unused = flags.FLAGS(sys.argv, known_only=True)
45
+ return main(unused)
46
+
47
+
48
+ def run_finetuning(train_path):
49
+ """Fine-tune a transformer model."""
50
+ train_data = glue.SST2Data("train")
51
+ val_data = glue.SST2Data("validation")
52
+ model = glue_models.SST2Model(FLAGS.encoder_name)
53
+ model.train(train_data.examples, validation_inputs=val_data.examples)
54
+ model.save(train_path)
55
+
56
+
57
+ def main(_):
58
+ model_path = FLAGS.model_path or tempfile.mkdtemp()
59
+ logging.info("Working directory: %s", model_path)
60
+ run_finetuning(model_path)
61
+
62
+ # Load our trained model.
63
+ models = {"sst": glue_models.SST2Model(model_path)}
64
+ datasets = {"sst_dev": glue.SST2Data("validation")}
65
+
66
+ # Start the LIT server. See server_flags.py for server options.
67
+ lit_demo = dev_server.Server(models, datasets, **server_flags.get_flags())
68
+ return lit_demo.serve()
69
+
70
+
71
+ if __name__ == "__main__":
72
+ app.run(main)