tkempto1 commited on
Commit
a9c20d4
·
verified ·
1 Parent(s): 0bd5e23

Upload DemoT5QAPipeline

Browse files
Files changed (3) hide show
  1. README.md +1 -1
  2. config.json +1 -1
  3. gen_model_pipe.py +39 -0
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
- library_name: transformers
3
  license: mit
 
4
  ---
5
 
6
  # Model Card for Model ID
 
1
  ---
 
2
  license: mit
3
+ library_name: transformers
4
  ---
5
 
6
  # Model Card for Model ID
config.json CHANGED
@@ -6,7 +6,7 @@
6
  "classifier_dropout": 0.0,
7
  "custom_pipelines": {
8
  "tyler-t5-qa": {
9
- "impl": "tyler_t5_model_pipe.DemoT5QAPipeline",
10
  "pt": [
11
  "AutoModelForSeq2SeqLM"
12
  ],
 
6
  "classifier_dropout": 0.0,
7
  "custom_pipelines": {
8
  "tyler-t5-qa": {
9
+ "impl": "gen_model_pipe.DemoT5QAPipeline",
10
  "pt": [
11
  "AutoModelForSeq2SeqLM"
12
  ],
gen_model_pipe.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+ import numpy as np
3
+ from transformers import Text2TextGenerationPipeline
4
+
5
+ class DemoT5QAPipeline(Text2TextGenerationPipeline):
6
+ def _forward(self, model_inputs, **generate_kwargs):
7
+ if self.framework == "pt":
8
+ in_b, input_length = model_inputs["input_ids"].shape
9
+ elif self.framework == "tf":
10
+ in_b, input_length = tf.shape(model_inputs["input_ids"]).numpy()
11
+
12
+ self.check_inputs(
13
+ input_length,
14
+ generate_kwargs.get("min_length", self.model.config.min_length),
15
+ generate_kwargs.get("max_length", self.model.config.max_length),
16
+ )
17
+ outputs = self.model.generate(**model_inputs, **generate_kwargs, return_dict_in_generate=True, output_scores=True, max_new_tokens=75)
18
+
19
+ # Code from the parent class
20
+ output_ids = outputs.sequences
21
+ out_b = output_ids.shape[0]
22
+ if self.framework == "pt":
23
+ output_ids = output_ids.reshape(in_b, out_b // in_b, *output_ids.shape[1:])
24
+ elif self.framework == "tf":
25
+ output_ids = tf.reshape(output_ids, (in_b, out_b // in_b, *output_ids.shape[1:]))
26
+
27
+ output_sequences = outputs.sequences
28
+ output_scores = outputs.scores
29
+ return {"output_ids": output_ids, "output_sequences": output_sequences, "output_scores": output_scores}
30
+
31
+ def postprocess(self, model_outputs):
32
+ guess_text = super().postprocess(model_outputs)[0]['generated_text']
33
+
34
+ # Calculate the guess probability
35
+ transition_scores = self.model.compute_transition_scores(model_outputs['output_sequences'], model_outputs['output_scores'], normalize_logits=True)
36
+ log_probs = np.round(np.exp(transition_scores.cpu().numpy()), 3)[0]
37
+ guess_prob = np.product(log_probs)
38
+
39
+ return {'guess': guess_text, 'confidence': guess_prob}