Update handler.py
Browse files- handler.py +4 -4
handler.py
CHANGED
@@ -9,13 +9,13 @@ dtype = torch.bfloat16 if torch.cuda.get_device_capability()[0] == 8 else torch.
|
|
9 |
class EndpointHandler:
|
10 |
def __init__(self, path=""):
|
11 |
# load the model
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
|
16 |
def __call__(self, data: Any) -> List[List[Dict[str, float]]]:
|
17 |
inputs = data.pop("inputs", data)
|
18 |
# ignoring parameters! Default to configs in generation_config.json.
|
19 |
messages = [{"role": "user", "content": inputs}]
|
20 |
-
response =
|
21 |
return [{'generated_text': response}]
|
|
|
9 |
class EndpointHandler:
|
10 |
def __init__(self, path=""):
|
11 |
# load the model
|
12 |
+
self.tokenizer = AutoTokenizer.from_pretrained("baichuan-inc/Baichuan-13B-Chat", use_fast=False, trust_remote_code=True)
|
13 |
+
self.model = AutoModelForCausalLM.from_pretrained("baichuan-inc/Baichuan-13B-Chat", device_map="auto", torch_dtype=dtype, trust_remote_code=True)
|
14 |
+
self.model.generation_config = GenerationConfig.from_pretrained("baichuan-inc/Baichuan-13B-Chat")
|
15 |
|
16 |
def __call__(self, data: Any) -> List[List[Dict[str, float]]]:
|
17 |
inputs = data.pop("inputs", data)
|
18 |
# ignoring parameters! Default to configs in generation_config.json.
|
19 |
messages = [{"role": "user", "content": inputs}]
|
20 |
+
response = self.model.chat(self.tokenizer, messages)
|
21 |
return [{'generated_text': response}]
|