Giguru Scheuer
commited on
Commit
·
6fce28e
1
Parent(s):
df7957c
Added sample set for testing
Browse files- README.md +6 -0
- test.py +1 -1
- trec-cast-2019-multi-turn.py +21 -7
README.md
CHANGED
@@ -38,6 +38,12 @@ text: str
|
|
38 |
The content of the passage.
|
39 |
```
|
40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
### Topics
|
42 |
You can get the topics as followed:
|
43 |
```python
|
|
|
38 |
The content of the passage.
|
39 |
```
|
40 |
|
41 |
+
#### Sample
|
42 |
+
Instead of using the entire data set, you can also download a sample set:
|
43 |
+
```python
|
44 |
+
collection = load_dataset('trec-cast-2019-multi-turn', 'test_collection_sample')
|
45 |
+
```
|
46 |
+
|
47 |
### Topics
|
48 |
You can get the topics as followed:
|
49 |
```python
|
test.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
from datasets import load_dataset
|
2 |
|
3 |
-
collection = load_dataset('trec-cast-2019-multi-turn.py')
|
4 |
collection.items()
|
5 |
|
6 |
qrels = load_dataset('trec-cast-2019-multi-turn.py', 'qrels')
|
|
|
1 |
from datasets import load_dataset
|
2 |
|
3 |
+
collection = load_dataset('trec-cast-2019-multi-turn.py', 'test_collection_sample')
|
4 |
collection.items()
|
5 |
|
6 |
qrels = load_dataset('trec-cast-2019-multi-turn.py', 'qrels')
|
trec-cast-2019-multi-turn.py
CHANGED
@@ -52,7 +52,7 @@ _URLs = {
|
|
52 |
'test_collection': {
|
53 |
'car': "http://trec-car.cs.unh.edu/datareleases/v2.0/paragraphCorpus.v2.0.tar.xz",
|
54 |
'msmarco': 'https://msmarco.blob.core.windows.net/msmarcoranking/collection.tar.gz',
|
55 |
-
}
|
56 |
}
|
57 |
|
58 |
|
@@ -80,6 +80,9 @@ class TrecCast2019MultiTurn(datasets.GeneratorBasedBuilder):
|
|
80 |
datasets.BuilderConfig(name="test_collection",
|
81 |
version=VERSION,
|
82 |
description="The test collection will provide the passages of TREC CAR and MSMARCO"),
|
|
|
|
|
|
|
83 |
]
|
84 |
|
85 |
# It's not mandatory to have a default configuration. Just use one if it make sense.
|
@@ -136,7 +139,8 @@ class TrecCast2019MultiTurn(datasets.GeneratorBasedBuilder):
|
|
136 |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
|
137 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
138 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
139 |
-
|
|
|
140 |
downloaded_files = dl_manager.download_and_extract(my_urls)
|
141 |
return [
|
142 |
datasets.SplitGenerator(
|
@@ -181,18 +185,28 @@ class TrecCast2019MultiTurn(datasets.GeneratorBasedBuilder):
|
|
181 |
query = queries[idx]
|
182 |
qid = f"{conversation_id}_{str(idx+1)}"
|
183 |
yield qid, ({'query': query, 'history': queries[:idx], 'qid': qid})
|
184 |
-
elif split == 'test_collection':
|
185 |
car_file = file['car'] + "/paragraphCorpus/dedup.articles-paragraphs.cbor"
|
186 |
msmarco_file = file['msmarco']+"/collection.tsv"
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
191 |
with open(msmarco_file) as f:
|
192 |
msmarco = csv.reader(f, delimiter="\t")
|
193 |
for line in msmarco:
|
194 |
docid, text = line
|
195 |
docid = f"MARCO_{docid}"
|
196 |
yield docid, ({"docno": docid, "text": text})
|
|
|
|
|
|
|
197 |
else:
|
198 |
raise NotImplementedError(f"'{split}' is not yet implemented")
|
|
|
52 |
'test_collection': {
|
53 |
'car': "http://trec-car.cs.unh.edu/datareleases/v2.0/paragraphCorpus.v2.0.tar.xz",
|
54 |
'msmarco': 'https://msmarco.blob.core.windows.net/msmarcoranking/collection.tar.gz',
|
55 |
+
},
|
56 |
}
|
57 |
|
58 |
|
|
|
80 |
datasets.BuilderConfig(name="test_collection",
|
81 |
version=VERSION,
|
82 |
description="The test collection will provide the passages of TREC CAR and MSMARCO"),
|
83 |
+
datasets.BuilderConfig(name="test_collection_sample",
|
84 |
+
version=VERSION,
|
85 |
+
description="A small sample of 20000 of the test collection passages."),
|
86 |
]
|
87 |
|
88 |
# It's not mandatory to have a default configuration. Just use one if it make sense.
|
|
|
139 |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
|
140 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
141 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
142 |
+
urlkey = 'test_collection' if self.config.name == 'test_collection_sample' else self.config.name
|
143 |
+
my_urls = _URLs[urlkey]
|
144 |
downloaded_files = dl_manager.download_and_extract(my_urls)
|
145 |
return [
|
146 |
datasets.SplitGenerator(
|
|
|
185 |
query = queries[idx]
|
186 |
qid = f"{conversation_id}_{str(idx+1)}"
|
187 |
yield qid, ({'query': query, 'history': queries[:idx], 'qid': qid})
|
188 |
+
elif split == 'test_collection' or split == 'test_collection_sample':
|
189 |
car_file = file['car'] + "/paragraphCorpus/dedup.articles-paragraphs.cbor"
|
190 |
msmarco_file = file['msmarco']+"/collection.tsv"
|
191 |
+
is_sample = split == 'test_collection_sample'
|
192 |
+
i = 0
|
193 |
+
with open(car_file, 'rb') as f:
|
194 |
+
for para in read_data.iter_paragraphs(f):
|
195 |
+
docid = f"CAR_{para.para_id}"
|
196 |
+
yield docid, ({"docno": docid, "text": para.get_text()})
|
197 |
+
i += 1
|
198 |
+
if is_sample and i >= 10000:
|
199 |
+
break
|
200 |
+
|
201 |
+
i = 0
|
202 |
with open(msmarco_file) as f:
|
203 |
msmarco = csv.reader(f, delimiter="\t")
|
204 |
for line in msmarco:
|
205 |
docid, text = line
|
206 |
docid = f"MARCO_{docid}"
|
207 |
yield docid, ({"docno": docid, "text": text})
|
208 |
+
i += 1
|
209 |
+
if is_sample and i >= 10000:
|
210 |
+
break
|
211 |
else:
|
212 |
raise NotImplementedError(f"'{split}' is not yet implemented")
|