WinterGYC commited on
Commit
84b2f3a
1 Parent(s): c16beac

Create handler.py

Browse files
Files changed (1) hide show
  1. handler.py +23 -0
handler.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from typing import Dict, List, Any
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
4
+ from transformers.generation.utils import GenerationConfig
5
+
6
+ # get dtype
7
+ dtype = torch.bfloat16 if torch.cuda.get_device_capability()[0] == 8 else torch.float16
8
+
9
+ class EndpointHandler:
10
+ def __init__(self, path=""):
11
+ # load the model
12
+ self.model = AutoModelForCausalLM.from_pretrained(path, device_map="auto", torch_dtype=dtype, trust_remote_code=True)
13
+ self.model.generation_config = GenerationConfig.from_pretrained(path)
14
+ self.tokenizer = AutoTokenizer.from_pretrained(path, use_fast=False, trust_remote_code=True)
15
+
16
+ def __call__(self, data: Any) -> List[List[Dict[str, float]]]:
17
+ inputs = data.pop("inputs", data)
18
+ # ignoring parameters! Default to configs in generation_config.json.
19
+ messages = [{"role": "user", "content": inputs}]
20
+ response = self.model.chat(self.tokenizer, messages)
21
+ if torch.backends.mps.is_available():
22
+ torch.mps.empty_cache()
23
+ return [{'generated_text': response}]