KoichiYasuoka commited on
Commit
3c0c29b
1 Parent(s): 05e3e0c

model improved

Browse files
Files changed (6) hide show
  1. README.md +2 -1
  2. config.json +0 -0
  3. pytorch_model.bin +2 -2
  4. supar.model +2 -2
  5. tokenizer.json +0 -0
  6. tokenizer_config.json +1 -1
README.md CHANGED
@@ -11,7 +11,8 @@ datasets:
11
  license: "cc-by-sa-4.0"
12
  pipeline_tag: "token-classification"
13
  widget:
14
- - text: "ⲙⲟⲟϣⲉϩⲱⲥϣⲏⲣⲉⲙⲡⲟⲩⲟⲉⲓⲛ"
 
15
  ---
16
 
17
  # roberta-small-coptic-upos
 
11
  license: "cc-by-sa-4.0"
12
  pipeline_tag: "token-classification"
13
  widget:
14
+ - text: "ⲧⲉⲛⲟⲩⲇⲉⲛ̄ⲟⲩⲟⲉⲓⲛϩ︤ⲙ︥ⲡϫⲟⲉⲓⲥ·"
15
+ - text: "ⲙⲟⲟϣⲉϩⲱⲥϣⲏⲣⲉⲙ̄ⲡⲟⲩⲟⲉⲓⲛ·"
16
  ---
17
 
18
  # roberta-small-coptic-upos
config.json CHANGED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:85b375ca0167ef1a9d45637bf2e84ca8c719550b40a2940977be4410b7db066c
3
- size 36995441
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2dfb82141540adb7cc6a376223c3cacbfe93386c667b898d41b1ee67070cea8
3
+ size 36993393
supar.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d390bb8e7b1b913e9fbaf2a95a26971181239dc961900ca3f58724fb0caba28b
3
- size 80700325
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e769002eb656b09dc4013164acee817c5cf051df31eea5772665c563862404c3
3
+ size 80700453
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1 +1 @@
1
- {"do_lower_case": false, "remove_space": true, "keep_accents": true, "bos_token": "[CLS]", "eos_token": "[SEP]", "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": {"content": "[MASK]", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "model_max_length": 128, "tokenizer_class": "RemBertTokenizerFast"}
 
1
+ {"do_lower_case": false, "remove_space": true, "keep_accents": false, "bos_token": "[CLS]", "eos_token": "[SEP]", "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": {"content": "[MASK]", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "model_max_length": 128, "tokenizer_class": "RemBertTokenizerFast"}