lannelin commited on
Commit
9edb475
1 Parent(s): c5c4329

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -96,7 +96,7 @@ out = model.generate(**inputs, output_scores=True, return_dict_in_generate=True,
96
  # sanity check that our sequences are expected length (1 + start token + end token = 3)
97
  for i, seq in enumerate(out.sequences):
98
  assert len(
99
- seq) == 3, f"generated sequence {i} not of expected length, 3." \\
100
  f" Actual length: {len(seq)}"
101
 
102
  # get the scores for our only token of interest
@@ -108,8 +108,8 @@ scores = out.scores[0]
108
  # sanity check that these labels are always the top 3 scoring
109
  for i, sequence_scores in enumerate(scores):
110
  top_scores = sequence_scores.argsort()[-3:]
111
- assert set(top_scores.tolist()) == set(label_inds), \\
112
- f"top scoring tokens are not expected for this task." \\
113
  f" Expected: {label_inds}. Got: {top_scores.tolist()}."
114
 
115
  # cut down scores to our task labels
@@ -135,7 +135,7 @@ print(entail_vs_contra_probas)
135
 
136
  # or we can show probas similar to `ZeroShotClassificationPipeline`
137
  # this gives a zero-shot classification style output across labels
138
- entail_scores = scores[:, 0]
139
  entail_probas = softmax(entail_scores, dim=0)
140
  print(entail_probas)
141
  # tensor([7.6341e-03, 4.2873e-04, 9.9194e-01])
 
96
  # sanity check that our sequences are expected length (1 + start token + end token = 3)
97
  for i, seq in enumerate(out.sequences):
98
  assert len(
99
+ seq) == 3, f"generated sequence {i} not of expected length, 3." \\\\
100
  f" Actual length: {len(seq)}"
101
 
102
  # get the scores for our only token of interest
 
108
  # sanity check that these labels are always the top 3 scoring
109
  for i, sequence_scores in enumerate(scores):
110
  top_scores = sequence_scores.argsort()[-3:]
111
+ assert set(top_scores.tolist()) == set(label_inds), \\\\
112
+ f"top scoring tokens are not expected for this task." \\\\
113
  f" Expected: {label_inds}. Got: {top_scores.tolist()}."
114
 
115
  # cut down scores to our task labels
 
135
 
136
  # or we can show probas similar to `ZeroShotClassificationPipeline`
137
  # this gives a zero-shot classification style output across labels
138
+ entail_scores = scores[:, entailment_ind]
139
  entail_probas = softmax(entail_scores, dim=0)
140
  print(entail_probas)
141
  # tensor([7.6341e-03, 4.2873e-04, 9.9194e-01])