MrD05 commited on
Commit
ec25fea
1 Parent(s): 1b42e9f

Upload 7 files

Browse files
Files changed (7) hide show
  1. code/inference.py +21 -0
  2. config.json +28 -0
  3. gptj.pt +3 -0
  4. merges.txt +0 -0
  5. special_tokens_map.json +1 -0
  6. tokenizer_config.json +1 -0
  7. vocab.json +0 -0
code/inference.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ from transformers import AutoTokenizer, pipeline
4
+
5
+ GPT_WEIGHTS_NAME = "gpt.pt"
6
+
7
+
8
+ def model_fn(model_dir):
9
+ model = torch.load(os.path.join(model_dir, GPT_WEIGHTS_NAME))
10
+ tokenizer = AutoTokenizer.from_pretrained(model_dir)
11
+
12
+ if torch.cuda.is_available():
13
+ device = 0
14
+ else:
15
+ device = -1
16
+
17
+ generation = pipeline(
18
+ "text-generation", model=model, tokenizer=tokenizer, device=device
19
+ )
20
+
21
+ return generation
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "opt-350m",
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "relu",
5
+ "architectures": [
6
+ "OPTForCausalLM"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "bos_token_id": 2,
10
+ "do_layer_norm_before": false,
11
+ "dropout": 0.1,
12
+ "eos_token_id": 2,
13
+ "ffn_dim": 4096,
14
+ "hidden_size": 1024,
15
+ "init_std": 0.02,
16
+ "layerdrop": 0.0,
17
+ "max_position_embeddings": 2048,
18
+ "model_type": "opt",
19
+ "num_attention_heads": 16,
20
+ "num_hidden_layers": 24,
21
+ "pad_token_id": 1,
22
+ "prefix": "</s>",
23
+ "torch_dtype": "float16",
24
+ "transformers_version": "4.20.0.dev0",
25
+ "use_cache": true,
26
+ "vocab_size": 50272,
27
+ "word_embed_proj_dim": 512
28
+ }
gptj.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d96052c3bc47377d93e0350190b6d057c491d683f515c87c024576b979454990
3
+ size 662549735
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"errors": "replace", "unk_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "add_bos_token": true, "special_tokens_map_file": null, "name_or_path": "patrickvonplaten/opt-30b"}
vocab.json ADDED
The diff for this file is too large to render. See raw diff