File size: 1,462 Bytes
ec31fb7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
from transformers import Pipeline
from transformers import PreTrainedTokenizer
from transformers.utils import ModelOutput
import torch 
from transformers import PreTrainedModel, Pipeline
from typing import Any, Dict, List

class GenPipeline(Pipeline):
    def __init__(
        self,
        model: PreTrainedModel,
        **kwargs
    ):
        super().__init__(
            model=model,
            **kwargs
        )

        print("in __init__")

    def __call__( self, question: str, **kwargs) -> Dict[str, Any]:
        inputs = {
            "question": question
        }

        outputs = self.model(**inputs)

        answer = self._process_output(outputs)

        print("in __call___")

        return answer 

    def _process_output(self, outputs: Any) -> str:

        print("in process outputs")

        format =  {'guess': outputs[0], 'confidence': outputs[1]}
        return format

    
    def _sanitize_parameters(self, **kwargs):
        print("in sanatize params")

        return {}, {}, {}

    def preprocess(self, inputs):
        print("in preprocess")

        return inputs

    def postprocess(self, outputs):
        print("in postprocess")
        format =  {'guess': outputs[0], 'confidence': float(outputs[1])}
        return format
    
    def _forward(self, input_tensors, **forward_parameters: Dict) -> ModelOutput:
        print("in _forward")
        return super()._forward(input_tensors, **forward_parameters)