lsy641 commited on
Commit
6563183
β€’
1 Parent(s): 00ff4e2
Files changed (5) hide show
  1. README.md +1 -1
  2. __pycache__/distinct.cpython-38.pyc +0 -0
  3. distinct.py +14 -15
  4. requirements.txt +2 -1
  5. tests.py +3 -6
README.md CHANGED
@@ -53,7 +53,7 @@ Downloading builder script: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ
53
  - **mode** *(string): 'Expectation-Adjusted-Distinct' or 'Distinct' for diversity calculation. If 'Expectation-Adjusted-Distinct', the scores for both modes will be returned. The default value is 'Expectation-Adjusted-Distinct'*
54
  - **vocab_size** *(int): For calculating 'Expectation-Adjusted-Distinct', either vocab_size or dataForVocabCal should not be None. Default value is None*
55
  - **dataForVocabCal** *(list of string): dataForVocabCal for calculating the vocab_size for 'Expectation-Adjusted-Distinct'. Typically, it should be a list of sentences consisting the task dataset. For calculating 'Expectation-Adjusted-Distinct', either vocab_size or dataForVocabCal should not be None. Default value is None*
56
- - **tokenizer** *(string or tokenizer class): tokenizer for splitting sentences into words. Default value is "white_space". NLTK tokenizer is available.*
57
 
58
  ### Output Values
59
 
 
53
  - **mode** *(string): 'Expectation-Adjusted-Distinct' or 'Distinct' for diversity calculation. If 'Expectation-Adjusted-Distinct', the scores for both modes will be returned. The default value is 'Expectation-Adjusted-Distinct'*
54
  - **vocab_size** *(int): For calculating 'Expectation-Adjusted-Distinct', either vocab_size or dataForVocabCal should not be None. Default value is None*
55
  - **dataForVocabCal** *(list of string): dataForVocabCal for calculating the vocab_size for 'Expectation-Adjusted-Distinct'. Typically, it should be a list of sentences consisting the task dataset. For calculating 'Expectation-Adjusted-Distinct', either vocab_size or dataForVocabCal should not be None. Default value is None*
56
+ - **tokenizer** *(string or tokenizer class): tokenizer for splitting sentences into words. Default value is Tokenizer13a(). NLTK tokenizer is available.*
57
 
58
  ### Output Values
59
 
__pycache__/distinct.cpython-38.pyc ADDED
Binary file (6.1 kB). View file
 
distinct.py CHANGED
@@ -15,6 +15,7 @@
15
 
16
  import evaluate
17
  import datasets
 
18
 
19
 
20
 
@@ -113,11 +114,11 @@ class distinct(evaluate.Measurement):
113
 
114
  def _download_and_prepare(self, dl_manager):
115
  """Optional: download external resources useful to compute the scores"""
116
- pass
117
 
118
- def _compute(self, predictions, dataForVocabCal=None, vocab_size=None, tokenizer="white_space", mode="Expectation-Adjusted-Distinct"):
119
-
120
  from nltk.util import ngrams
 
 
121
 
122
  """Returns the scores"""
123
  if mode == "Expectation-Adjusted-Distinct" and vocab_size is None and dataForVocabCal is None:
@@ -127,6 +128,9 @@ class distinct(evaluate.Measurement):
127
  elif mode == "Distinct":
128
  pass
129
 
 
 
 
130
  if mode == "Expectation-Adjusted-Distinct" and dataForVocabCal is not None:
131
  if isinstance(dataForVocabCal, list) and len(dataForVocabCal) > 0 and isinstance(dataForVocabCal[0], str):
132
  vocab = set()
@@ -145,18 +149,13 @@ class distinct(evaluate.Measurement):
145
  total_tokens_2grams = []
146
  total_tokens_3grams = []
147
 
148
- for prediction in predictions:
149
- if tokenizer == "white_space":
150
- tokens = prediction.split(" ")
151
- tokens_2grams = list(ngrams(prediction.split(" "), 2, pad_left=True, left_pad_symbol='<s>'))
152
- tokens_3grams = list(ngrams(prediction.split(" "), 3, pad_left=True, left_pad_symbol='<s>'))
153
- else:
154
- try:
155
- tokens = list(tokenizer.tokenize(prediction))
156
- tokens_2grams = list(ngrams(list(tokenizer.tokenize(prediction)), 2, pad_left=True, left_pad_symbol='<s>'))
157
- tokens_3grams = list(ngrams(list(tokenizer.tokenize(prediction)), 3, pad_left=True, left_pad_symbol='<s>'))
158
- except Exception as e:
159
- raise e
160
 
161
  distinct_tokens = distinct_tokens | set(tokens)
162
  distinct_tokens_2grams = distinct_tokens_2grams | set(tokens_2grams)
 
15
 
16
  import evaluate
17
  import datasets
18
+ from .tokenizer_13a import Tokenizer13a
19
 
20
 
21
 
 
114
 
115
  def _download_and_prepare(self, dl_manager):
116
  """Optional: download external resources useful to compute the scores"""
 
117
 
118
+ def _compute(self, predictions, dataForVocabCal=None, vocab_size=None, tokenizer=Tokenizer13a(), mode="Expectation-Adjusted-Distinct"):
 
119
  from nltk.util import ngrams
120
+
121
+
122
 
123
  """Returns the scores"""
124
  if mode == "Expectation-Adjusted-Distinct" and vocab_size is None and dataForVocabCal is None:
 
128
  elif mode == "Distinct":
129
  pass
130
 
131
+ if tokenizer == "white_space":
132
+ tokenizer = WhitespaceTokenizer()
133
+
134
  if mode == "Expectation-Adjusted-Distinct" and dataForVocabCal is not None:
135
  if isinstance(dataForVocabCal, list) and len(dataForVocabCal) > 0 and isinstance(dataForVocabCal[0], str):
136
  vocab = set()
 
149
  total_tokens_2grams = []
150
  total_tokens_3grams = []
151
 
152
+ for prediction in predictions:
153
+ try:
154
+ tokens = list(tokenizer.tokenize(prediction))
155
+ tokens_2grams = list(ngrams(list(tokenizer.tokenize(prediction)), 2, pad_left=True, left_pad_symbol='<s>'))
156
+ tokens_3grams = list(ngrams(list(tokenizer.tokenize(prediction)), 3, pad_left=True, left_pad_symbol='<s>'))
157
+ except Exception as e:
158
+ raise e
 
 
 
 
 
159
 
160
  distinct_tokens = distinct_tokens | set(tokens)
161
  distinct_tokens_2grams = distinct_tokens_2grams | set(tokens_2grams)
requirements.txt CHANGED
@@ -1 +1,2 @@
1
- git+https://github.com/huggingface/evaluate@main
 
 
1
+ git+https://github.com/huggingface/evaluate@main
2
+ nltk
tests.py CHANGED
@@ -1,17 +1,14 @@
1
  test_cases = [
2
  {
3
- "predictions": [0, 0],
4
- "references": [1, 1],
5
  "result": {"metric_score": 0}
6
  },
7
  {
8
- "predictions": [1, 1],
9
- "references": [1, 1],
10
  "result": {"metric_score": 1}
11
  },
12
  {
13
- "predictions": [1, 0],
14
- "references": [1, 1],
15
  "result": {"metric_score": 0.5}
16
  }
17
  ]
 
1
  test_cases = [
2
  {
3
+ "predictions": ["Hi.", "I'm sorry to hear that", "I don't know"],
 
4
  "result": {"metric_score": 0}
5
  },
6
  {
7
+ "predictions": ["Hi.", "I'm sorry to hear that", "I don't know"],
 
8
  "result": {"metric_score": 1}
9
  },
10
  {
11
+ "predictions": ["Hi.", "I'm sorry to hear that", "I don't know"],
 
12
  "result": {"metric_score": 0.5}
13
  }
14
  ]