pcuenq HF staff commited on
Commit
a7a8043
1 Parent(s): 222ea66

Improve the 3-class sentence-based model.

Browse files

In the previous training we inadvertently used a sequence length of just `20` tokens,
which is what `AutoConfig.from_pretrained defaults to. This model has been trained
with up to 512 tokens, better utilizing Roberta's capabilities.

F1 macro score is now `0.6951`.

Files changed (2) hide show
  1. config.json +2 -1
  2. pytorch_model.bin +1 -1
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "./artifacts/model-36sxpfct-2744-0.5888:v0",
3
  "architectures": [
4
  "RobertaForSequenceClassification"
5
  ],
@@ -24,6 +24,7 @@
24
  "intermediate": 1
25
  },
26
  "layer_norm_eps": 1e-05,
 
27
  "max_position_embeddings": 514,
28
  "model_type": "roberta",
29
  "num_attention_heads": 12,
1
  {
2
+ "_name_or_path": "./artifacts/model-1qe3kbqj-2742-0.6951:v0",
3
  "architectures": [
4
  "RobertaForSequenceClassification"
5
  ],
24
  "intermediate": 1
25
  },
26
  "layer_norm_eps": 1e-05,
27
+ "max_length": 512,
28
  "max_position_embeddings": 514,
29
  "model_type": "roberta",
30
  "num_attention_heads": 12,
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:56ad453cd87d0c3835c6811c196e8dfbb0f3063cfa59226d8dbc686bdef31f16
3
  size 498675309
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26854ee4fa633135cd6bc6f190398ae0cca099a8d3e9adeb54c94bb61a536530
3
  size 498675309