PiC commited on
Commit
66d4090
1 Parent(s): a12dc0a

Update phrase_retrieval.py

Browse files
Files changed (1) hide show
  1. phrase_retrieval.py +19 -10
phrase_retrieval.py CHANGED
@@ -28,14 +28,21 @@ logger = datasets.logging.get_logger(__name__)
28
 
29
 
30
  _CITATION = """\
 
 
 
 
 
 
31
  """
32
 
33
  _DESCRIPTION = """\
 
34
  """
35
 
36
- _HOMEPAGE = ""
37
 
38
- _LICENSE = "CC-BY-4.0"
39
 
40
  _URL = "https://auburn.edu/~tmp0038/PiC/"
41
  _SPLITS = {
@@ -65,12 +72,12 @@ class PhraseRetrieval(datasets.GeneratorBasedBuilder):
65
  BUILDER_CONFIGS = [
66
  PRConfig(
67
  name=_PR_PASS,
68
- version=datasets.Version("1.0.0"),
69
  description="The PiC Dataset for Phrase Retrieval at short passage level (~11 sentences)"
70
  ),
71
  PRConfig(
72
  name=_PR_PAGE,
73
- version=datasets.Version("1.0.0"),
74
  description="The PiC Dataset for Phrase Retrieval at Wiki page level"
75
  ),
76
  ]
@@ -83,13 +90,13 @@ class PhraseRetrieval(datasets.GeneratorBasedBuilder):
83
  "id": datasets.Value("string"),
84
  "title": datasets.Value("string"),
85
  "context": datasets.Value("string"),
86
- "query": datasets.Value("string"),
87
  "answers": datasets.Sequence(
88
  {
89
  "text": datasets.Value("string"),
90
  "answer_start": datasets.Value("int32"),
91
  }
92
- ),
93
  }
94
  ),
95
  # No default supervised_keys (as we have to pass both question and context as input).
@@ -105,7 +112,6 @@ class PhraseRetrieval(datasets.GeneratorBasedBuilder):
105
  )
106
 
107
  def _split_generators(self, dl_manager):
108
-
109
  urls_to_download = {
110
  "train": os.path.join(_URL, self.config.name, _SPLITS["train"]),
111
  "dev": os.path.join(_URL, self.config.name, _SPLITS["dev"]),
@@ -126,20 +132,23 @@ class PhraseRetrieval(datasets.GeneratorBasedBuilder):
126
  with open(filepath, encoding="utf-8") as f:
127
  pic_pr = json.load(f)
128
  for example in pic_pr["data"]:
 
 
129
  answer_starts = [answer["answer_start"] for answer in example["answers"]]
130
  answers = [answer["text"] for answer in example["answers"]]
131
 
132
  # Features currently used are "context", "question", and "answers".
133
  # Others are extracted here for the ease of future expansions.
134
  yield key, {
135
- "title": example["title"],
136
  "context": example["context"],
137
- "query": example["question"],
138
  "id": example["id"],
139
  "answers": {
140
  "answer_start": answer_starts,
141
  "text": answers,
142
- },
143
  }
144
  key += 1
145
 
 
 
28
 
29
 
30
  _CITATION = """\
31
+ @article{pham2022PiC,
32
+ title={PiC: A Phrase-in-Context Dataset for Phrase Understanding and Semantic Search},
33
+ author={Pham, Thang M and Yoon, Seunghyun and Bui, Trung and Nguyen, Anh},
34
+ journal={arXiv preprint arXiv:2207.09068},
35
+ year={2022}
36
+ }
37
  """
38
 
39
  _DESCRIPTION = """\
40
+ Phrase in Context is a curated benchmark for phrase understanding and semantic search, consisting of three tasks of increasing difficulty: Phrase Similarity (PS), Phrase Retrieval (PR) and Phrase Sense Disambiguation (PSD). The datasets are annotated by 13 linguistic experts on Upwork and verified by two groups: ~1000 AMT crowdworkers and another set of 5 linguistic experts. PiC benchmark is distributed under CC-BY-NC 4.0.
41
  """
42
 
43
+ _HOMEPAGE = "https://phrase-in-context.github.io/"
44
 
45
+ _LICENSE = "CC-BY-NC-4.0"
46
 
47
  _URL = "https://auburn.edu/~tmp0038/PiC/"
48
  _SPLITS = {
 
72
  BUILDER_CONFIGS = [
73
  PRConfig(
74
  name=_PR_PASS,
75
+ version=datasets.Version("1.0.2"),
76
  description="The PiC Dataset for Phrase Retrieval at short passage level (~11 sentences)"
77
  ),
78
  PRConfig(
79
  name=_PR_PAGE,
80
+ version=datasets.Version("1.0.2"),
81
  description="The PiC Dataset for Phrase Retrieval at Wiki page level"
82
  ),
83
  ]
 
90
  "id": datasets.Value("string"),
91
  "title": datasets.Value("string"),
92
  "context": datasets.Value("string"),
93
+ "question": datasets.Value("string"),
94
  "answers": datasets.Sequence(
95
  {
96
  "text": datasets.Value("string"),
97
  "answer_start": datasets.Value("int32"),
98
  }
99
+ )
100
  }
101
  ),
102
  # No default supervised_keys (as we have to pass both question and context as input).
 
112
  )
113
 
114
  def _split_generators(self, dl_manager):
 
115
  urls_to_download = {
116
  "train": os.path.join(_URL, self.config.name, _SPLITS["train"]),
117
  "dev": os.path.join(_URL, self.config.name, _SPLITS["dev"]),
 
132
  with open(filepath, encoding="utf-8") as f:
133
  pic_pr = json.load(f)
134
  for example in pic_pr["data"]:
135
+ title = example.get("title", "")
136
+
137
  answer_starts = [answer["answer_start"] for answer in example["answers"]]
138
  answers = [answer["text"] for answer in example["answers"]]
139
 
140
  # Features currently used are "context", "question", and "answers".
141
  # Others are extracted here for the ease of future expansions.
142
  yield key, {
143
+ "title": title,
144
  "context": example["context"],
145
+ "question": example["question"],
146
  "id": example["id"],
147
  "answers": {
148
  "answer_start": answer_starts,
149
  "text": answers,
150
+ }
151
  }
152
  key += 1
153
 
154
+