xinhe commited on
Commit
db4d7af
1 Parent(s): 0d24f9d
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "./tmp/sst2_output",
3
  "activation": "gelu",
4
  "architectures": [
5
  "DistilBertForSequenceClassification"
 
1
  {
2
+ "_name_or_path": "distilbert-base-uncased-finetuned-sst-2-english",
3
  "activation": "gelu",
4
  "architectures": [
5
  "DistilBertForSequenceClassification"
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:16c2421d9ce522d5c855b58d7e4379dbe9f5a5e6f824dd42e0509e00b9eab0ee
3
- size 68184481
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92efc824e8ec5d2c95c2c862e06ad93a465e0a23c55207d0905d27a5a2c0d7d1
3
+ size 68199393
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "distilbert-base-uncased-finetuned-sst-2-english", "do_basic_tokenize": true, "never_split": null, "tokenizer_class": "DistilBertTokenizer"}
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7bf6bf149cc70ad27ced23f0702b9fa444eaff306692a54b65aba89dfe3d797b
3
+ size 2991
vocab.txt ADDED
The diff for this file is too large to render. See raw diff