Laeyoung Chang commited on
Commit
168c0e1
1 Parent(s): f681b34

upload model

Browse files
MAR-INF/MANIFEST.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "createdOn": "08/06/2021 07:26:17",
3
+ "runtime": "python",
4
+ "model": {
5
+ "modelName": "gpt-2-en-small-finetune",
6
+ "serializedFile": "pytorch_model.bin",
7
+ "handler": "handler.py",
8
+ "modelVersion": "1.0"
9
+ },
10
+ "archiverVersion": "0.3.0"
11
+ }
config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/model",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "gradient_checkpointing": false,
12
+ "initializer_range": 0.02,
13
+ "layer_norm_epsilon": 1e-05,
14
+ "model_type": "gpt2",
15
+ "n_ctx": 1024,
16
+ "n_embd": 768,
17
+ "n_head": 12,
18
+ "n_inner": null,
19
+ "n_layer": 12,
20
+ "n_positions": 1024,
21
+ "resid_pdrop": 0.1,
22
+ "scale_attn_weights": true,
23
+ "summary_activation": null,
24
+ "summary_first_dropout": 0.1,
25
+ "summary_proj_to_labels": true,
26
+ "summary_type": "cls_index",
27
+ "summary_use_proj": true,
28
+ "task_specific_params": {
29
+ "text-generation": {
30
+ "do_sample": true,
31
+ "max_length": 50
32
+ }
33
+ },
34
+ "transformers_version": "4.6.1",
35
+ "use_cache": true,
36
+ "vocab_size": 50257
37
+ }
handler.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gc
3
+ from ts.torch_handler.base_handler import BaseHandler
4
+ from transformers import GPT2LMHeadModel
5
+
6
+ import logging
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+
11
+ class SampleTransformerModel(BaseHandler):
12
+ def __init__(self):
13
+ super(SampleTransformerModel, self).__init__()
14
+ self.model = None
15
+ self.device = None
16
+ self.initialized = False
17
+
18
+ def load_model(self, model_dir):
19
+ self.model = GPT2LMHeadModel.from_pretrained(model_dir, return_dict=True)
20
+ self.model.to(self.device)
21
+
22
+ def initialize(self, ctx):
23
+ # self.manifest = ctx.manifest
24
+ properties = ctx.system_properties
25
+ model_dir = properties.get("model_dir")
26
+ self.device = torch.device("cuda:" + str(properties.get("gpu_id")) if torch.cuda.is_available() else "cpu")
27
+
28
+ self.load_model(model_dir)
29
+
30
+ self.model.eval()
31
+ self.initialized = True
32
+
33
+ def preprocess(self, requests):
34
+ input_batch = {}
35
+ for idx, data in enumerate(requests):
36
+ input_ids = torch.tensor([data.get("body").get("text")]).to(self.device)
37
+ input_batch["input_ids"] = input_ids
38
+ input_batch["num_samples"] = data.get("body").get("num_samples")
39
+ input_batch["length"] = data.get("body").get("length") + len(data.get("body").get("text"))
40
+ del requests
41
+ gc.collect()
42
+ return input_batch
43
+
44
+ def inference(self, input_batch):
45
+ input_ids = input_batch["input_ids"]
46
+ length = input_batch["length"]
47
+
48
+ inference_output = self.model.generate(input_ids,
49
+ bos_token_id=self.model.config.bos_token_id,
50
+ eos_token_id=self.model.config.eos_token_id,
51
+ pad_token_id=self.model.config.eos_token_id,
52
+ do_sample=True,
53
+ max_length=length,
54
+ top_k=50,
55
+ top_p=0.95,
56
+ no_repeat_ngram_size=2,
57
+ num_return_sequences=input_batch["num_samples"])
58
+
59
+ if torch.cuda.is_available():
60
+ torch.cuda.empty_cache()
61
+ del input_batch
62
+ gc.collect()
63
+ return inference_output
64
+
65
+ def postprocess(self, inference_output):
66
+ output = inference_output.cpu().numpy().tolist()
67
+ del inference_output
68
+ gc.collect()
69
+ return [output]
70
+
71
+ def handle(self, data, context):
72
+ # self.context = context
73
+ data = self.preprocess(data)
74
+ data = self.inference(data)
75
+ data = self.postprocess(data)
76
+ return data
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:502013578c9ea5ad0e8fc054739b8ade4d931710ff0e69b1ecc0f43dc344c6d6
3
+ size 510408315
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "unk_token": "<|endoftext|>"}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff