File size: 1,501 Bytes
792e040
 
 
 
 
 
 
 
 
 
abdc7a2
 
792e040
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
from typing import Dict, List, Any
import torch
from transformers import AutoTokenizer
from auto_gptq import AutoGPTQForCausalLM

class EndpointHandler():
    def __init__(self, path=""):
        # Preload all the elements you are going to need at inference.
        # pseudo:
        self.tokenizer = AutoTokenizer.from_pretrained("philschmid/falcon-40b-instruct-GPTQ-inference-endpoints", use_fast=False)
        self.model = AutoGPTQForCausalLM.from_quantized("philschmid/falcon-40b-instruct-GPTQ-inference-endpoints", device="cuda:0", use_triton=False, use_safetensors=True, torch_dtype=torch.float32, trust_remote_code=True)



    def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
        """
       data args:
            inputs (:obj: `str` | `PIL.Image` | `np.array`)
            kwargs
      Return:
            A :obj:`list` | `dict`: will be serialized and returned
        """
        # process input
        inputs = data.pop("inputs", data)
        parameters = data.pop("parameters", None)

        # preprocess
        input_ids = self.tokenizer(inputs, return_tensors="pt").input_ids

        # pass inputs with all kwargs in data
        if parameters is not None:
            outputs = self.model.generate(input_ids, **parameters)
        else:
            outputs = self.model.generate(input_ids)

        # postprocess the prediction
        prediction = self.tokenizer.decode(outputs[0], skip_special_tokens=True)

        return [{"generated_text": prediction}]