tugstugi commited on
Commit
3ad751d
1 Parent(s): ee9a2fb

update README

Browse files
Files changed (2) hide show
  1. README.md +9 -3
  2. config.json +1 -2
README.md CHANGED
@@ -19,10 +19,10 @@ This repository is based on the following open source projects: [google-research
19
  #### How to use
20
 
21
  ```python
22
- from transformers import pipeline, AutoTokenizer, BertForMaskedLM
23
 
24
- tokenizer = AutoTokenizer.from_pretrained('tugstugi/bert-base-mongolian-uncased')
25
- model = BertForMaskedLM.from_pretrained('tugstugi/bert-base-mongolian-uncased')
26
 
27
  ## declare task ##
28
  pipe = pipeline(task="fill-mask", model=model, tokenizer=tokenizer)
@@ -34,6 +34,12 @@ output_ = pipe(input_)
34
  for i in range(len(output_)):
35
  print(output_[i])
36
 
 
 
 
 
 
 
37
  ```
38
 
39
 
 
19
  #### How to use
20
 
21
  ```python
22
+ from transformers import pipeline, AutoTokenizer, AutoModelForMaskedLM
23
 
24
+ tokenizer = AutoTokenizer.from_pretrained('tugstugi/bert-base-mongolian-uncased', use_fast=False)
25
+ model = AutoModelForMaskedLM.from_pretrained('tugstugi/bert-base-mongolian-uncased')
26
 
27
  ## declare task ##
28
  pipe = pipeline(task="fill-mask", model=model, tokenizer=tokenizer)
 
34
  for i in range(len(output_)):
35
  print(output_[i])
36
 
37
+ ## output ##
38
+ #{'sequence': 'миний хувьд хоол идэх нь тун чухал.', 'score': 0.7889143824577332, 'token': 126, 'token_str': 'хувьд'}
39
+ #{'sequence': 'миний бодлоор хоол идэх нь тун чухал.', 'score': 0.18616807460784912, 'token': 6106, 'token_str': 'бодлоор'}
40
+ #{'sequence': 'миний зүгээс хоол идэх нь тун чухал.', 'score': 0.004825591575354338, 'token': 761, 'token_str': 'зүгээс'}
41
+ #{'sequence': 'миний биед хоол идэх нь тун чухал.', 'score': 0.0015743684489279985, 'token': 3010, 'token_str': 'биед'}
42
+ #{'sequence': 'миний тухайд хоол идэх нь тун чухал.', 'score': 0.0014919431414455175, 'token': 1712, 'token_str': 'тухайд'}
43
  ```
44
 
45
 
config.json CHANGED
@@ -17,6 +17,5 @@
17
  "pad_token_id": 0,
18
  "type_vocab_size": 2,
19
  "vocab_size": 32000,
20
- "tokenizer_class": "AlbertTokenizer",
21
- "transformers_version": "3.5.1"
22
  }
 
17
  "pad_token_id": 0,
18
  "type_vocab_size": 2,
19
  "vocab_size": 32000,
20
+ "tokenizer_class": "AlbertTokenizer"
 
21
  }