Update README.md
Browse files
README.md
CHANGED
@@ -63,7 +63,8 @@ labels = ['admiration', 'amusement', 'anger', 'annoyance', 'approval', 'caring',
|
|
63 |
|
64 |
tokenizer = Tokenizer.from_pretrained("SamLowe/roberta-base-go_emotions")
|
65 |
|
66 |
-
#
|
|
|
67 |
params = {**tokenizer.padding, "length": None}
|
68 |
tokenizer.enable_padding(**params)
|
69 |
|
@@ -87,8 +88,11 @@ def sigmoid(_outputs):
|
|
87 |
return 1.0 / (1.0 + np.exp(-_outputs))
|
88 |
|
89 |
logits = model.run(output_names=output_names, input_feed=input_feed_dict)[0]
|
|
|
90 |
|
91 |
-
|
|
|
|
|
92 |
|
93 |
# for example, just to show the top result per input item
|
94 |
for probas in model_outputs:
|
|
|
63 |
|
64 |
tokenizer = Tokenizer.from_pretrained("SamLowe/roberta-base-go_emotions")
|
65 |
|
66 |
+
# Optional - set pad to only pad to longest in batch, not a fixed length.
|
67 |
+
# (without this, the model will run slower, esp for shorter input strings)
|
68 |
params = {**tokenizer.padding, "length": None}
|
69 |
tokenizer.enable_padding(**params)
|
70 |
|
|
|
88 |
return 1.0 / (1.0 + np.exp(-_outputs))
|
89 |
|
90 |
logits = model.run(output_names=output_names, input_feed=input_feed_dict)[0]
|
91 |
+
# produces a numpy array, one row per input item, one col per label
|
92 |
|
93 |
+
# Post-processing. Gets the scores per label in range.
|
94 |
+
# Auto done by Transformers' pipeline, but we must di it manually with ORT.
|
95 |
+
model_outputs = sigmoid(logits)
|
96 |
|
97 |
# for example, just to show the top result per input item
|
98 |
for probas in model_outputs:
|