af1tang commited on
Commit
7b13971
1 Parent(s): 7332f20

weights update

Browse files
Files changed (6) hide show
  1. added_tokens.json +1 -1
  2. config.json +1 -1
  3. merges.txt +1 -1
  4. pytorch_model.bin +1 -1
  5. tokenizer_config.json +1 -1
  6. vocab.json +0 -0
added_tokens.json CHANGED
@@ -1 +1 @@
1
- {"<|sep|>": 50257, "<|act|>": 50262, "<|cls|>": 50258, "<|start|>": 50259, "<|p1|>": 50260, "<|p2|>": 50261}
 
1
+ {"<|sep|>": 50257, "<|cls|>": 50258, "<|start|>": 50259, "<|p1|>": 50260, "<|p2|>": 50261, "<|act|>": 50262}
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "af1tang/personaGPT",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPT2LMHeadModel"
 
1
  {
2
+ "_name_or_path": "/home/af1tang/convogym/checkpoint/model/",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPT2LMHeadModel"
merges.txt CHANGED
@@ -1,4 +1,4 @@
1
- #version: 0.2 - Trained by `huggingface/tokenizers`
2
  Ġ t
3
  Ġ a
4
  h e
 
1
+ #version: 0.2
2
  Ġ t
3
  Ġ a
4
  h e
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7397c2e647a7d4e328b67f4d5cac13cb32713a6282a94e41db2ed44782aad2e3
3
  size 1444556021
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bba0725e119469e2773c082b1ff949e5f249df88fa2eab2e1bbd37e28b1627d6
3
  size 1444556021
tokenizer_config.json CHANGED
@@ -1 +1 @@
1
- {"unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "pad_token": "<|endoftext|>", "cls_token": "<|cls|>", "sep_token": "<|sep|>", "special_tokens_map_file": null, "full_tokenizer_file": null, "name_or_path": "af1tang/personaGPT", "errors": "replace", "tokenizer_class": "GPT2Tokenizer"}
 
1
+ {"errors": "replace", "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "pad_token": "<|endoftext|>", "cls_token": "<|cls|>", "sep_token": "<|sep|>", "special_tokens_map_file": null, "full_tokenizer_file": null, "tokenizer_file": "/home/af1tang/convogym/checkpoint/model/tokenizer.json", "name_or_path": "/home/af1tang/convogym/checkpoint/model/", "tokenizer_class": "GPT2Tokenizer"}
vocab.json CHANGED
The diff for this file is too large to render. See raw diff