Mofe commited on
Commit
fc0e263
1 Parent(s): 8aee123

Update config

Browse files
Files changed (2) hide show
  1. README.md +5 -1
  2. ciral.py +39 -14
README.md CHANGED
@@ -39,12 +39,16 @@ This dataset repo contains only the queries and relevance judgements. The corpus
39
  ```
40
  ciral_dataset = load_dataset("ciral/ciral", "hausa") #or swahili, somali, yoruba
41
 
42
- for data in ciral_data['train']: # or 'test'
43
  query_id = data['query_id']
44
  query = data['query']
45
  pos_qrels = data['positive_passages']
46
  neg_qrels = data['negative_passages']
47
 
 
 
 
 
48
  for qrel in pos_qrels:
49
  docid = qrel['docid']
50
  text = qrel['text']
 
39
  ```
40
  ciral_dataset = load_dataset("ciral/ciral", "hausa") #or swahili, somali, yoruba
41
 
42
+ for data in ciral_data['dev']: # or 'testA' or 'testB'
43
  query_id = data['query_id']
44
  query = data['query']
45
  pos_qrels = data['positive_passages']
46
  neg_qrels = data['negative_passages']
47
 
48
+ # To load test set A's pool judgments
49
+ pools_pos_qrels = data['pools_positive_passages']
50
+ pools_neg_qrels = data['pools_negative_passages']
51
+
52
  for qrel in pos_qrels:
53
  docid = qrel['docid']
54
  text = qrel['text']
ciral.py CHANGED
@@ -40,12 +40,18 @@ _LICENSE = ""
40
 
41
  _URLS = {
42
  lang: {
43
- 'train': [
44
- f'https://huggingface.co/datasets/CIRAL/ciral/resolve/main/ciral-{lang}/topics/topics.ciral-v1.0-{lang_code}-train.tsv',
45
- f'https://huggingface.co/datasets/CIRAL/ciral/resolve/main/ciral-{lang}/qrels/qrels.ciral-v1.0-{lang_code}-train.tsv'
46
  ],
47
- 'test':[
48
- f'https://huggingface.co/datasets/CIRAL/ciral/resolve/main/ciral-{lang}/topics/topics.ciral-v1.0-{lang_code}-test.tsv'
 
 
 
 
 
 
49
  ]
50
  } for lang, lang_code in languages.items()
51
  }
@@ -124,15 +130,21 @@ class CIRAL(datasets.GeneratorBasedBuilder):
124
  downloaded_files = dl_manager.download_and_extract(_URLS[lang])
125
  return [
126
  datasets.SplitGenerator(
127
- name='train',
 
 
 
 
 
 
128
  gen_kwargs={
129
- 'filepaths': downloaded_files['train'],
130
  },
131
  ),
132
  datasets.SplitGenerator(
133
- name='test',
134
  gen_kwargs={
135
- 'filepaths': downloaded_files['test'],
136
  },
137
  ),
138
  ]
@@ -140,16 +152,20 @@ class CIRAL(datasets.GeneratorBasedBuilder):
140
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
141
  def _generate_examples(self, filepaths):
142
  lang = self.config.name
143
- corpus = datasets.load_dataset('ciral/ciral-corpus', lang)['train']
144
  docid2doc = {doc['docid']: doc['text'] for doc in corpus}
145
 
146
- query_file, qrel_file = (filepaths) if len(filepaths) == 2 else (filepaths[0], None)
147
  queries = load_queries(query_file)
148
- qrels = load_qrels(qrel_file)
 
 
149
  for query_id in queries:
 
 
150
 
151
- positive_docids = [docid for docid, judgement in qrels[query_id].items() if judgement==1] if qrels is not None else []
152
- negative_docids = [docid for docid, judgement in qrels[query_id].items() if judgement==0] if qrels is not None else []
153
 
154
  data = {}
155
  data['query_id'] = query_id
@@ -163,4 +179,13 @@ class CIRAL(datasets.GeneratorBasedBuilder):
163
  'text': docid2doc[docid]
164
  } for docid in negative_docids if docid in docid2doc]
165
 
 
 
 
 
 
 
 
 
 
166
  yield query_id, data
 
40
 
41
  _URLS = {
42
  lang: {
43
+ 'dev': [
44
+ f'https://huggingface.co/datasets/CIRAL/ciral/resolve/main/ciral-{lang}/topics/topics.ciral-v1.0-{lang_code}-dev.tsv',
45
+ f'https://huggingface.co/datasets/CIRAL/ciral/resolve/main/ciral-{lang}/qrels/qrels.ciral-v1.0-{lang_code}-dev.tsv'
46
  ],
47
+ 'testA':[
48
+ f'https://huggingface.co/datasets/CIRAL/ciral/resolve/main/ciral-{lang}/topics/topics.ciral-v1.0-{lang_code}-test-a.tsv',
49
+ f'https://huggingface.co/datasets/CIRAL/ciral/resolve/main/ciral-{lang}/topics/topics.ciral-v1.0-{lang_code}-test-a.tsv',
50
+ f'https://huggingface.co/datasets/CIRAL/ciral/resolve/main/ciral-{lang}/topics/topics.ciral-v1.0-{lang_code}-test-a-pools.tsv',
51
+ ],
52
+ 'testB':[
53
+ f'https://huggingface.co/datasets/CIRAL/ciral/resolve/main/ciral-{lang}/topics/topics.ciral-v1.0-{lang_code}-test-b.tsv',
54
+ f'https://huggingface.co/datasets/CIRAL/ciral/resolve/main/ciral-{lang}/topics/topics.ciral-v1.0-{lang_code}-test-b.tsv',
55
  ]
56
  } for lang, lang_code in languages.items()
57
  }
 
130
  downloaded_files = dl_manager.download_and_extract(_URLS[lang])
131
  return [
132
  datasets.SplitGenerator(
133
+ name='dev',
134
+ gen_kwargs={
135
+ 'filepaths': downloaded_files['dev'],
136
+ },
137
+ ),
138
+ datasets.SplitGenerator(
139
+ name='test-a',
140
  gen_kwargs={
141
+ 'filepaths': downloaded_files['test-a'],
142
  },
143
  ),
144
  datasets.SplitGenerator(
145
+ name='test-b',
146
  gen_kwargs={
147
+ 'filepaths': downloaded_files['test-b'],
148
  },
149
  ),
150
  ]
 
152
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
153
  def _generate_examples(self, filepaths):
154
  lang = self.config.name
155
+ corpus = datasets.load_dataset('ciral/ciral-corpus', lang)['dev']
156
  docid2doc = {doc['docid']: doc['text'] for doc in corpus}
157
 
158
+ query_file, qrel_file, pools_file = (filepaths) if len(filepaths) == 3 else (filepaths[0], filepaths[1], None)
159
  queries = load_queries(query_file)
160
+ shallow_qrels = load_qrels(qrel_file)
161
+ pools_qrels = load_qrels(pools_file)
162
+
163
  for query_id in queries:
164
+ positive_docids = [docid for docid, judgement in shallow_qrels[query_id].items() if judgement==1]
165
+ negative_docids = [docid for docid, judgement in shallow_qrels[query_id].items() if judgement==0]
166
 
167
+ pools_positive_docids = [docid for docid, judgement in pools_qrels[query_id].items() if judgement==1] if pools_qrels is not None else []
168
+ pools_negative_docids = [docid for docid, judgement in pools_qrels[query_id].items() if judgement==0] if pools_qrels is not None else []
169
 
170
  data = {}
171
  data['query_id'] = query_id
 
179
  'text': docid2doc[docid]
180
  } for docid in negative_docids if docid in docid2doc]
181
 
182
+ data['pools_positive_passages'] = [{
183
+ 'docid': docid,
184
+ 'text': docid2doc[docid]
185
+ } for docid in pools_positive_docids if docid in docid2doc]
186
+ data['pools_negative_passages'] = [{
187
+ 'docid': docid,
188
+ 'text': docid2doc[docid]
189
+ } for docid in pools_negative_docids if docid in docid2doc]
190
+
191
  yield query_id, data