orionweller commited on
Commit
0a8b37d
1 Parent(s): 50dea48

annotation ready

Browse files
analysis.py DELETED
@@ -1,140 +0,0 @@
1
- import pandas as pd
2
- import numpy as np
3
- import os
4
- import torch
5
- from transformers import pipeline
6
- import streamlit as st
7
-
8
- import plotly.express as px
9
- import plotly.figure_factory as ff
10
-
11
- from captum.attr import LayerIntegratedGradients, TokenReferenceBase, visualization
12
- from captum.attr import visualization as viz
13
- from captum import attr
14
- from captum.attr._utils.visualization import format_word_importances, format_special_tokens, _get_color
15
-
16
-
17
- os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
18
-
19
-
20
- def results_to_df(results: dict, metric_name: str):
21
- metric_scores = []
22
- for topic, results_dict in results.items():
23
- for metric_name_cur, metric_value in results_dict.items():
24
- if metric_name == metric_name_cur:
25
- metric_scores.append(metric_value)
26
- return pd.DataFrame({metric_name: metric_scores})
27
-
28
-
29
- def create_boxplot_1df(results: dict, metric_name: str):
30
- df = results_to_df(results, metric_name)
31
- fig = px.box(df, y=metric_name)
32
- return fig
33
-
34
-
35
- def create_boxplot_2df(results1, results2, metric_name):
36
- df1 = results_to_df(results1, metric_name)
37
- df2 = results_to_df(results2, metric_name)
38
- df2["Run"] = "Run 2"
39
- df1["Run"] = "Run 1"
40
- df = pd.concat([df1, df2])
41
-
42
- # Create distplot with custom bin_size
43
- fig = px.histogram(df, x=metric_name, color="Run", marginal="box", hover_data=df.columns)
44
- return fig
45
-
46
-
47
- def create_boxplot_diff(results1, results2, metric_name):
48
- df1 = results_to_df(results1, metric_name)
49
- df2 = results_to_df(results2, metric_name)
50
- diff = df1[metric_name] - df2[metric_name]
51
-
52
- x_axis = f"Difference in {metric_name} from 1 to 2"
53
- fig = px.histogram(pd.DataFrame({x_axis: diff}), x=x_axis, marginal="box")
54
- return fig
55
-
56
-
57
- def summarize_attributions(attributions):
58
- attributions = attributions.sum(dim=-1).squeeze(0)
59
- attributions = attributions / torch.norm(attributions)
60
- return attributions
61
-
62
-
63
- def get_words(words, importances):
64
- words_colored = []
65
- for word, importance in zip(words, importances[: len(words)]):
66
- word = format_special_tokens(word)
67
- color = _get_color(importance)
68
- unwrapped_tag = '<span style="background-color: {color}; opacity:1.0; line-height:1.75">{word}</span>'.format(
69
- color=color, word=word
70
- )
71
- words_colored.append(unwrapped_tag)
72
- return words_colored
73
-
74
- @st.cache_resource
75
- def get_model(model_name: str):
76
- if "MonoT5" in model_name:
77
- if model_name == "MonoT5-Small":
78
- pipe = pipeline('text2text-generation',
79
- model='castorini/monot5-small-msmarco-10k',
80
- tokenizer='castorini/monot5-small-msmarco-10k',
81
- device='cpu')
82
- elif model_name == "MonoT5-3B":
83
- pipe = pipeline('text2text-generation',
84
- model='castorini/monot5-3b-msmarco-10k',
85
- tokenizer='castorini/monot5-3b-msmarco-10k',
86
- device='cpu')
87
- def formatter(query, doc):
88
- return f"Query: {query} Document: {doc} Relevant:"
89
-
90
-
91
- return pipe, formatter
92
-
93
- def prep_func(pipe, formatter):
94
- # variables that only need to be run once
95
- decoder_input_ids = pipe.tokenizer(["<pad>"], return_tensors="pt", add_special_tokens=False, truncation=True).input_ids.to('cpu')
96
- decoder_embedding_layer = pipe.model.base_model.decoder.embed_tokens
97
- decoder_inputs_emb = decoder_embedding_layer(decoder_input_ids)
98
-
99
- token_false_id = pipe.tokenizer.get_vocab()['▁false']
100
- token_true_id = pipe.tokenizer.get_vocab()["▁true"]
101
-
102
- # this function needs to be run for each combination
103
- @st.cache_data
104
- def get_saliency(query, doc):
105
- input_ids = pipe.tokenizer(
106
- [formatter(query, doc)],
107
- padding=False,
108
- truncation=True,
109
- return_tensors="pt",
110
- max_length=pipe.tokenizer.model_max_length,
111
- )["input_ids"].to('cpu')
112
-
113
- embedding_layer = pipe.model.base_model.encoder.embed_tokens
114
- inputs_emb = embedding_layer(input_ids)
115
-
116
- def forward_from_embeddings(inputs_embeds, decoder_inputs_embeds):
117
- logits = pipe.model.forward(inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds)['logits'][:, -1, :]
118
- batch_scores = logits[:, [token_false_id, token_true_id]]
119
- batch_scores = torch.nn.functional.log_softmax(batch_scores, dim=1)
120
- scores = batch_scores[:, 1].exp() # relevant token
121
- return scores
122
-
123
- lig = attr.Saliency(forward_from_embeddings)
124
- attributions_ig, delta = lig.attribute(
125
- inputs=(inputs_emb, decoder_inputs_emb)
126
- )
127
- attributions_normed = summarize_attributions(attributions_ig)
128
- return "\n".join(get_words(pipe.tokenizer.convert_ids_to_tokens(input_ids.squeeze(0).tolist()), attributions_normed))
129
-
130
- return get_saliency
131
-
132
-
133
- if __name__ == "__main__":
134
- query = "how to add dll to visual studio?"
135
- doc = "StackOverflow In the days of 16-bit Windows, a WPARAM was a 16-bit word, while LPARAM was a 32-bit long. These distinctions went away in Win32; they both became 32-bit values. ... WPARAM is defined as UINT_PTR , which in 64-bit Windows is an unsigned, 64-bit value."
136
- model, formatter = get_model("MonoT5")
137
- get_saliency = prep_func(model, formatter)
138
- print(get_saliency(query, doc))
139
-
140
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py CHANGED
@@ -1,46 +1,58 @@
1
  import streamlit as st
2
  import os
3
  import pathlib
4
- import beir
5
- from beir import util
6
- from beir.datasets.data_loader import GenericDataLoader
7
- import pytrec_eval
8
  import pandas as pd
9
  from collections import defaultdict
10
  import json
11
  import copy
12
  import plotly.express as px
13
 
14
- from constants import ALL_DATASETS, ALL_METRICS
15
- from dataset_loading import get_dataset, load_run, load_local_qrels, load_local_corpus, load_local_queries
16
- from analysis import create_boxplot_1df, create_boxplot_2df, create_boxplot_diff, get_model, prep_func
17
 
18
 
19
  os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
20
  st.set_page_config(layout="wide")
21
 
 
 
22
 
23
- if 'cur_instance_num' not in st.session_state:
24
- st.session_state.cur_instance_num = -1
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
 
27
- def update_details(run_details, run_score):
28
- if run_score == 0:
29
- run_details["none"] += 1
30
- elif run_score == 1:
31
- run_details["perfect"] += 1
32
- else:
33
- run_details["inbetween"] += 1
34
- return run_details
 
 
 
35
 
 
 
36
 
37
- def check_valid_args(run1_file, run2_file, dataset_name, qrels, queries, corpus):
38
- if run1_file is not None and dataset_name not in ["", None, "custom"]:
39
- return True
40
- elif run1_file is not None and dataset_name == "custom":
41
- if qrels is not None and queries is not None and corpus is not None:
42
- return True
43
- return False
44
 
45
 
46
  def validate(config_option, file_loaded):
@@ -49,196 +61,54 @@ def validate(config_option, file_loaded):
49
  st.stop()
50
 
51
 
52
- def combine(text_og, text_new, combine_type):
53
- if combine_type == "None":
54
- return text_og
55
- elif combine_type == "Append":
56
- return text_og + " <APPEND> " + text_new
57
- elif combine_type == "Prepend":
58
- return text_new + " <PREPEND> " + text_og
59
- elif combine_type == "Replace":
60
- return text_new
61
- else:
62
- raise ValueError("Invalid combine type")
63
-
64
  with st.sidebar:
65
  st.title("Options")
66
- dataset_name = st.selectbox("Select a preloaded dataset or upload your own (note: some datasets are large/slow)", tuple(ALL_DATASETS))
67
- if st.checkbox("Choose fields (applies to IR_Datasets only)"):
68
- input_fields_doc = st.text_input("Type the name of the doc fields to get, with commas (blank=all)")
69
- if input_fields_doc in ["", None]:
70
- input_fields_doc = None
71
- input_fields_query = st.sidebar.text_input("Type the name of the query fields to get, with commas (blank=all)")
72
- if input_fields_query in ["", None]:
73
- input_fields_query = None
74
- else:
75
- input_fields_doc = None
76
- input_fields_query = None
77
-
78
- metric_name = st.selectbox("Select a metric", tuple(ALL_METRICS))
79
-
80
- if dataset_name == "custom":
81
- st.header("Upload corpus")
82
- corpus_file = st.file_uploader("Choose a file", key="corpus")
83
- corpus = load_local_corpus(corpus_file)
84
- st.header("Upload queries")
85
- queries_file = st.file_uploader("Choose a file", key="queries")
86
- queries = load_local_queries(queries_file)
87
- st.header("Upload qrels")
88
- qrels_file = st.file_uploader("Choose a file", key="qrels")
89
- qrels = load_local_qrels(qrels_file)
90
- else:
91
- qrels = None
92
- queries = None
93
- corpus = None
94
-
95
-
96
- x = st.header('Upload a run file')
97
- run1_file = st.file_uploader("Choose a file", key="run1")
98
- y = st.header("Upload a second run file")
99
- run2_file = st.file_uploader("Choose a file", key="run2")
100
 
101
  z = st.header("Analysis Options")
102
  # sliderbar of how many Top N to choose
103
- top_n = st.slider("Top N Ranked Docs", 1, 100, 3)
104
- n_relevant_docs = st.slider("Number of relevant docs", 1, 100, 3)
105
- incorrect_only = st.checkbox("Show only incorrect instances", value=False)
106
- one_better_than_two = st.checkbox("Show only instances where run 1 is better than run 2", value=False)
107
- two_better_than_one = st.checkbox("Show only instances where run 2 is better than run 1", value=False)
108
- use_model_saliency = st.checkbox("Use model saliency (slow!)", value=False)
109
- if use_model_saliency:
110
- # choose from a list of models
111
- model_name = st.selectbox("Choose from a list of models", ["MonoT5-Small", "MonoT5-3B"])
112
- model, formatter = get_model(model_name)
113
- get_saliency = prep_func(model, formatter)
114
-
115
-
116
- advanced_options1 = st.checkbox("Show advanced options for Run 1", value=False)
117
- doc_expansion1 = doc_expansion2 = None
118
- query_expansion1 = query_expansion2 = None
119
- run1_uses_query_expansion = "None"
120
- run1_uses_doc_expansion = "None"
121
- run2_uses_query_expansion = "None"
122
- run2_uses_doc_expansion = "None"
123
- if advanced_options1:
124
- doc_header = st.header("Upload a Document Expansion file")
125
- doc_expansion_file = st.file_uploader("Choose a file", key="doc_expansion")
126
- if doc_expansion_file is not None:
127
- doc_expansion1 = load_local_corpus(doc_expansion_file)
128
- query_header = st.header("Upload a Query Expansion file")
129
- query_expansion_file = st.file_uploader("Choose a file", key="query_expansion")
130
- if query_expansion_file is not None:
131
- query_expansion1 = load_local_queries(query_expansion_file)
132
-
133
- run1_uses_query_expansion = st.selectbox("Type of query expansion used in run 1", ("None", "Append", "Prepend", "Replace"))
134
- run1_uses_doc_expansion = st.selectbox("Type of document expansion used in run 1", ("None", "Append", "Prepend", "Replace"))
135
- validate(run1_uses_query_expansion, query_expansion_file)
136
- validate(run1_uses_doc_expansion, doc_expansion_file)
137
-
138
- advanced_options2 = st.checkbox("Show advanced options for Run 2", value=False)
139
- if advanced_options2:
140
- doc_header = st.header("Upload a Document Expansion file")
141
- doc_expansion_file = st.file_uploader("Choose a file", key="doc_expansion2")
142
- if doc_expansion_file is not None:
143
- doc_expansion2 = load_local_corpus(doc_expansion_file)
144
- query_header = st.header("Upload a Query Expansion file")
145
- query_expansion_file = st.file_uploader("Choose a file", key="query_expansion2")
146
- if query_expansion_file is not None:
147
- query_expansion2 = load_local_queries(query_expansion_file)
148
-
149
- run2_uses_query_expansion = st.selectbox("Type of query expansion used in run 2", ("None", "Append", "Prepend", "Replace"))
150
- run2_uses_doc_expansion = st.selectbox("Type of document expansion used in run 2", ("None", "Append", "Prepend", "Replace"))
151
- validate(run2_uses_query_expansion, query_expansion_file)
152
- validate(run2_uses_doc_expansion, doc_expansion_file)
153
-
154
-
155
- # everything hinges on the run being uploaded, so do that first
156
- # init_title = st.title("Upload Run and Choose Details")
157
-
158
- if run1_file is not None:
159
- run1, run1_pandas = load_run(run1_file)
160
-
161
- # do everything, now that we have the run file
162
- if check_valid_args(run1_file, run2_file, dataset_name, qrels, queries, corpus):
163
- # init_title = st.title("Analysis")
164
- # don't load these til a run is given
165
- if dataset_name != "custom":
166
- corpus, queries, qrels = get_dataset(dataset_name, input_fields_doc, input_fields_query)
167
-
168
- evaluator = pytrec_eval.RelevanceEvaluator(
169
- copy.deepcopy(qrels), pytrec_eval.supported_measures)
170
- results1 = evaluator.evaluate(run1) # dict of instance then metrics then values
171
- average_run1_score = pytrec_eval.compute_aggregated_measure(metric_name, [query_measures[metric_name] for query_measures in results1.values()])
172
- if len(results1) == 0:
173
- # alert and stop
174
- st.error("Run file is empty")
175
- st.stop()
176
 
177
- if run2_file is not None:
178
- run2, run2_pandas = load_run(run2_file)
179
- # NOTE: will fail if run1 is not uploaded
180
- evaluator2 = pytrec_eval.RelevanceEvaluator(
181
- copy.deepcopy(qrels), pytrec_eval.supported_measures)
182
- results2 = evaluator2.evaluate(run2)
183
- average_run2_score = pytrec_eval.compute_aggregated_measure(metric_name, [query_measures[metric_name] for query_measures in results2.values()])
184
 
 
185
 
186
- col1, col2 = st.columns([1, 3], gap="large")
 
 
187
 
188
- # incorrect = 0
189
- is_better_run1_count = 0
190
- is_better_run2_count = 0
191
- is_same_count = 0
192
- run1_details = {"none": 0, "perfect": 0, "inbetween": 0}
193
- run2_details = {"none": 0, "perfect": 0, "inbetween": 0}
194
  with col1:
 
 
 
 
 
195
  st.title("Instances")
196
- if run1_file is not None:
197
- set_of_cols = set(run1_pandas.qid.tolist())
198
- container_for_nav = st.container()
199
- name_of_columns = sorted([item for item in set_of_cols])
200
- instances_to_use = []
201
- # st.divider()
202
- for idx in range(len(name_of_columns)):
203
- is_incorrect = False
204
- is_better_run1 = False
205
- is_better_run2 = False
206
-
207
- run1_score = results1[str(name_of_columns[idx])][metric_name] if idx else 1
208
- run1_details = update_details(run1_details, run1_score)
209
- if run2_file is not None:
210
- run2_score = results2[str(name_of_columns[idx])][metric_name] if idx else 1
211
- run2_details = update_details(run2_details, run2_score)
212
-
213
- if run1_score == 0 or run2_score == 0:
214
- is_incorrect = True
215
-
216
- if run1_score > run2_score:
217
- is_better_run1_count += 1
218
- is_better_run1 = True
219
- elif run2_score > run1_score:
220
- is_better_run2_count += 1
221
- is_better_run2 = True
222
- else:
223
- is_same_count += 1
224
-
225
-
226
- if not incorrect_only or is_incorrect:
227
- if not one_better_than_two or is_better_run1:
228
- if not two_better_than_one or is_better_run2:
229
- # check = st.checkbox(f"{idx}. " + str(name_of_columns[idx]), key=f"{idx}check")
230
- # st.divider()
231
- instances_to_use.append(name_of_columns[idx])
232
- else:
233
- if run1_score == 0:
234
- is_incorrect = True
235
-
236
- if not incorrect_only or is_incorrect:
237
- # check = st.checkbox(f"{idx}. " + str(name_of_columns[idx]), key=f"{idx}check")
238
- # st.divider()
239
- instances_to_use.append(name_of_columns[idx])
240
-
241
-
242
  def sync_from_drop():
243
  if st.session_state.selectbox_instance == "Overview":
244
  st.session_state.number_of_col = -1
@@ -261,378 +131,62 @@ if check_valid_args(run1_file, run2_file, dataset_name, qrels, queries, corpus):
261
  number_of_col = container_for_nav.number_input(min_value=-1, step=1, max_value=len(instances_to_use) - 1, on_change=sync_from_number, label=f"Select instance by index (up to **{len(instances_to_use) - 1}**)", key="number_of_col")
262
  selectbox_instance = container_for_nav.selectbox("Select instance by ID", ["Overview"] + name_of_columns, on_change=sync_from_drop, key="selectbox_instance")
263
  st.divider()
264
- # make pie plot showing incorrect vs correct
265
- st.header("Breakdown")
266
-
267
-
268
- if run2_file is None:
269
- overall_scores_container = st.container()
270
- left_score, right_score = overall_scores_container.columns([1, 1])
271
- left_score.metric(label=f"Run 1 {metric_name}", value=round(average_run1_score, 3))
272
- right_score.metric(label="#Q", value=len(results1))
273
-
274
- plotly_pie_chart = px.pie(names=["Perfect", "Inbetween", "None"], values=[run1_details["perfect"], run1_details["inbetween"], run1_details["none"]])
275
- st.write("Run 1 Scores")
276
- plotly_pie_chart.update_traces(showlegend=False, selector=dict(type='pie'), textposition='inside', textinfo='percent+label')
277
- st.plotly_chart(plotly_pie_chart, use_container_width=True)
278
- else:
279
- overall_scores_container = st.container()
280
- left_score, right_score = overall_scores_container.columns([1, 1])
281
- left_score.metric(label=f"Run 1 {metric_name}", value=round(average_run1_score, 3))
282
- right_score.metric(label=f"Run 2 {metric_name}", value=round(average_run2_score, 3))
283
-
284
- if st.checkbox("Show Run 1 vs Run 2", value=True):
285
- plotly_pie_chart = px.pie(names=["Run 1 Better", "Run 2 Better", "Tied"], values=[is_better_run1_count, is_better_run2_count, is_same_count])
286
- plotly_pie_chart.update_traces(showlegend=False, selector=dict(type='pie'), textposition='inside', textinfo='percent+label')
287
- st.plotly_chart(plotly_pie_chart, use_container_width=True)
288
-
289
- if st.checkbox("Show Run 1 Breakdown"):
290
- plotly_pie_chart_run1 = px.pie(names=["Perfect", "Inbetween", "None"], values=[run1_details["perfect"], run1_details["inbetween"], run1_details["none"]])
291
- plotly_pie_chart_run1.update_traces(showlegend=False, selector=dict(type='pie'), textposition='inside', textinfo='percent+label')
292
- st.plotly_chart(plotly_pie_chart_run1, use_container_width=True)
293
- if st.checkbox("Show Run 2 Breakdown"):
294
- plotly_pie_chart_run2 = px.pie(names=["Perfect", "Inbetween", "None"], values=[run2_details["perfect"], run2_details["inbetween"], run2_details["none"]])
295
- plotly_pie_chart_run2.update_traces(showlegend=False, selector=dict(type='pie'), textposition='inside', textinfo='percent+label')
296
- st.plotly_chart(plotly_pie_chart_run2, use_container_width=True)
297
-
298
 
299
 
300
  with col2:
301
- # st.title(f"Information ({len(checkboxes) - 1}/{len(name_of_columns) - 1})")
302
- ### Only one run file
303
- if run1_file is not None and run2_file is None:
304
-
305
- # get instance number
306
- inst_index = number_of_col
307
-
308
- if inst_index >= 0:
309
- inst_num = instances_to_use[inst_index - 1]
310
-
311
- st.markdown("<h1 style='text-align: center; color: black;text-decoration: underline;'>Run 1</h1>", unsafe_allow_html=True)
312
-
313
- container = st.container()
314
-
315
- rank_col, score_col, id_col = container.columns([2,1,3])
316
- id_col.metric("ID", inst_num)
317
- score_col.metric(metric_name, results1[str(inst_num)][metric_name])
318
-
319
- # st.subheader(f"ID")
320
- # st.markdown(inst_num)
321
- st.divider()
322
-
323
- st.subheader(f"Query")
324
- if run1_uses_query_expansion != "None":
325
- show_orig_rel = st.checkbox("Show Original Query", key=f"{inst_index}reloriguery", value=False)
326
-
327
- query_text_og = queries[str(inst_num)]
328
- if query_expansion1 is not None and run1_uses_query_expansion != "None" and not show_orig_rel:
329
- alt_text = query_expansion1[str(inst_num)]
330
- query_text = combine(query_text_og, alt_text, run1_uses_query_expansion)
331
- else:
332
- query_text = query_text_og
333
- st.markdown(query_text)
334
- st.divider()
335
-
336
- ## Documents
337
- # relevant
338
- relevant_docs = list(qrels[str(inst_num)].keys())[:n_relevant_docs]
339
- doc_texts = [(doc_id, corpus[doc_id]["title"] if "title" in corpus[doc_id] else "", corpus[doc_id]["text"]) for doc_id in relevant_docs]
340
- st.subheader("Relevant Documents")
341
- if doc_expansion1 is not None and run1_uses_doc_expansion != "None":
342
- show_orig_rel = st.checkbox("Show Original Relevant Doc(s)", key=f"{inst_index}relorig", value=False)
343
-
344
- for (docid, title, text) in doc_texts:
345
- if doc_expansion1 is not None and run1_uses_doc_expansion != "None" and not show_orig_rel:
346
- alt_text = doc_expansion1[docid]["text"]
347
- text = combine(text, alt_text, run1_uses_doc_expansion)
348
-
349
- if use_model_saliency:
350
- if st.checkbox("Show Model Saliency", key=f"{inst_index}model_saliency", value=False):
351
- st.markdown(get_saliency(query_text, doc_texts),unsafe_allow_html=True)
352
- else:
353
- st.text_area(f"{docid}:", text)
354
-
355
- else:
356
- st.text_area(f"{docid}:", text)
357
-
358
-
359
- # go through each of the relevant documents
360
- ranks = []
361
- for docid in relevant_docs:
362
- pred_doc = run1_pandas[run1_pandas.doc_id.isin([docid])]
363
- rank_pred = pred_doc[pred_doc.qid == str(inst_num)]
364
- if rank_pred.empty:
365
- ranks.append("-")
366
- else:
367
- ranks.append(rank_pred.iloc[0]["rank"])
368
- # st.subheader("Ranked of Documents")
369
- # st.markdown(f"Rank: {rank_pred}")
370
- ranking_str = ",".join([str(item) for item in ranks])
371
- if ranking_str == "":
372
- ranking_str = "-"
373
- rank_col.metric(f"Rank of Relevant Doc(s)", ranking_str)
374
- # breakpoint()
375
-
376
-
377
- st.divider()
378
-
379
- # top ranked
380
-
381
- if st.checkbox('Show top ranked documents', key=f"{inst_index}top-1run"):
382
- st.subheader("Top N Ranked Documents")
383
- if doc_expansion1 is not None and run1_uses_doc_expansion != "None":
384
- show_orig_rel_ranked = st.checkbox("Show Original Ranked Doc(s)", key=f"{inst_index}relorigdocs", value=False)
385
-
386
- run1_top_n = run1_pandas[run1_pandas.qid == str(inst_num)][:top_n]
387
- run1_top_n_docs = [corpus[str(doc_id)] for doc_id in run1_top_n.doc_id.tolist()]
388
- if doc_expansion1 is not None and run1_uses_doc_expansion != "None" and not show_orig_rel_ranked:
389
- run1_top_n_docs_alt = [doc_expansion1[str(doc_id)] for doc_id in run1_top_n.doc_id.tolist()]
390
- for d_idx, doc in enumerate(run1_top_n_docs):
391
- alt_text = run1_top_n_docs_alt[d_idx]["text"]
392
- doc_text = combine(doc["text"], alt_text, run1_uses_doc_expansion)
393
- if use_model_saliency:
394
- if st.checkbox("Show Model Saliency", key=f"{inst_index}model_saliency", value=False):
395
- st.markdown(get_saliency(query_text, doc_text),unsafe_allow_html=True)
396
- else:
397
- st.text_area(f"{run1_top_n['doc_id'].iloc[d_idx]}: ", doc_text, key=f"{inst_num}doc{d_idx}")
398
- else:
399
- st.text_area(f"{run1_top_n['doc_id'].iloc[d_idx]}: ", doc_text, key=f"{inst_num}doc{d_idx}")
400
- else:
401
- for d_idx, doc in enumerate(run1_top_n_docs):
402
- if use_model_saliency:
403
- if st.checkbox("Show Model Saliency", key=f"{inst_index}model_saliency{d_idx}ranked", value=False):
404
- st.markdown(get_saliency(query_text, doc),unsafe_allow_html=True)
405
- else:
406
- st.text_area(f"{run1_top_n['doc_id'].iloc[d_idx]}: ", doc["text"], key=f"{inst_num}doc{d_idx}")
407
- else:
408
- st.text_area(f"{run1_top_n['doc_id'].iloc[d_idx]}: ", doc["text"], key=f"{inst_num}doc{d_idx}")
409
- st.divider()
410
-
411
- # none checked
412
- elif inst_index < 0:
413
- st.title("Overview")
414
- st.subheader(f"Scores of {metric_name}")
415
- plotly_chart = create_boxplot_1df(results1, metric_name)
416
- st.plotly_chart(plotly_chart)
417
-
418
- ## Both run files available
419
- elif run1_file is not None and run2_file is not None:
420
- has_check = False
421
- container_top = st.container()
422
-
423
- # get instance number
424
- inst_index = number_of_col
425
-
426
- if inst_index >= 0:
427
- inst_num = instances_to_use[inst_index]
428
-
429
- col_run1, col_run2 = container_top.columns([1,1])
430
- col_run1.markdown("<h1 style='text-align: center; color: black;text-decoration: underline;'>Run 1</h1>", unsafe_allow_html=True)
431
- col_run2.markdown("<h1 style='text-align: center; color: black;text-decoration: underline;'>Run 2</h1>", unsafe_allow_html=True)
432
-
433
- container_overview = st.container()
434
- rank_col1, score_col1, rank_col2, score_col2 = container_overview.columns([2,1,2,1])
435
- # id_col1.metric("", "")
436
- score_col1.metric("Run 1 " + metric_name, results1[str(inst_num)][metric_name])
437
- score_col2.metric("Run 2 " + metric_name, results2[str(inst_num)][metric_name])
438
-
439
- st.divider()
440
-
441
- st.subheader(f"Query")
442
- container_two_query = st.container()
443
- col_run1, col_run2 = container_two_query.columns(2, gap="medium")
444
-
445
- query_text_og = queries[str(inst_num)]
446
- if run1_uses_query_expansion != "None" and run2_uses_query_expansion != "None":
447
- alt_text1 = query_expansion1[str(inst_num)]
448
- alt_text2 = query_expansion2[str(inst_num)]
449
- combined_text1 = combine(query_text_og, alt_text1, run1_uses_query_expansion)
450
- combined_text2 = combine(query_text_og, alt_text2, run2_uses_query_expansion)
451
- col_run1.markdown(combined_text1)
452
- col_run2.markdown(combined_text2)
453
- query_text1 = combined_text1
454
- query_text2 = combined_text2
455
- elif run1_uses_query_expansion != "None":
456
- alt_text = query_expansion1[str(inst_num)]
457
- combined_text1 = combine(query_text_og, alt_text, run1_uses_query_expansion)
458
- col_run1.markdown(combined_text1)
459
- col_run2.markdown(query_text_og)
460
- query_text1 = combined_text1
461
- query_text2 = query_text_og
462
- elif run2_uses_query_expansion != "None":
463
- alt_text = query_expansion2[str(inst_num)]
464
- combined_text2 = combine(query_text_og, alt_text, run2_uses_query_expansion)
465
- col_run1.markdown(query_text_og)
466
- col_run2.markdown(combined_text2)
467
- query_text1 = query_text_og
468
- query_text2 = combined_text2
469
- else:
470
- query_text = query_text_og
471
- col_run1.markdown(query_text)
472
- col_run2.markdown(query_text)
473
- query_text1 = query_text
474
- query_text2 = query_text
475
-
476
- st.divider()
477
-
478
-
479
-
480
- ## Documents
481
- # relevant
482
- st.subheader("Relevant Documents")
483
- container_two_docs_rel = st.container()
484
- col_run1, col_run2 = container_two_docs_rel.columns(2, gap="medium")
485
- relevant_docs = list(qrels[str(inst_num)].keys())[:n_relevant_docs]
486
- relevant_score = {ind_doc_id: qrels[str(inst_num)][ind_doc_id] for ind_doc_id in relevant_docs}
487
- doc_texts = [(doc_id, corpus[doc_id]["title"] if "title" in corpus[doc_id] else "", corpus[doc_id]["text"], relevant_score[doc_id]) for doc_id in relevant_docs]
488
-
489
- if doc_expansion1 is not None and run1_uses_doc_expansion != "None":
490
- show_orig_rel1 = col_run1.checkbox("Show Original Relevant Doc(s)", key=f"{inst_index}relorig_run1", value=False)
491
- if doc_expansion2 is not None and run2_uses_doc_expansion != "None":
492
- show_orig_rel2 = col_run2.checkbox("Show Original Relevant Doc(s)", key=f"{inst_index}relorig_run2", value=False)
493
-
494
- for (docid, title, text, rel_score) in doc_texts:
495
- if doc_expansion1 is not None and run1_uses_doc_expansion != "None" and not show_orig_rel1:
496
- alt_text = doc_expansion1[docid]["text"]
497
- text = combine(text, alt_text, run1_uses_doc_expansion)
498
-
499
- if use_model_saliency:
500
- if col_run1.checkbox("Show Model Saliency", key=f"{inst_index}model_saliency{docid}relevant", value=False):
501
- col_run1.markdown(get_saliency(query_text1, text),unsafe_allow_html=True)
502
- else:
503
- col_run1.text_area(f"{docid} (Rel: {rel_score}):", text, key=f"{inst_num}doc{docid}1")
504
- else:
505
- col_run1.text_area(f"{docid} (Rel: {rel_score}):", text, key=f"{inst_num}doc{docid}1")
506
-
507
- for (docid, title, text, rel_score) in doc_texts:
508
- if doc_expansion2 is not None and run2_uses_doc_expansion != "None" and not show_orig_rel2:
509
- alt_text = doc_expansion2[docid]["text"] if docid in doc_expansion2 else "<NOT EXPANDED>"
510
- text = combine(text, alt_text, run2_uses_doc_expansion)
511
-
512
- if use_model_saliency:
513
- if col_run2.checkbox("Show Model Saliency", key=f"{inst_index}model_saliency{docid}relevant2", value=False):
514
- col_run2.markdown(get_saliency(query_text2, text),unsafe_allow_html=True)
515
- else:
516
- col_run2.text_area(f"{docid}: (Rel: {rel_score})", text, key=f"{inst_num}doc{docid}2")
517
- else:
518
- col_run2.text_area(f"{docid}: (Rel: {rel_score})", text, key=f"{inst_num}doc{docid}2")
519
-
520
- # top ranked
521
- # NOTE: BEIR calls trec_eval which ranks by score, then doc_id for ties
522
- # we have to fix that or we don't match the scores
523
-
524
- ranks2 = []
525
- for docid in relevant_docs:
526
- pred_doc = run2_pandas[run2_pandas.doc_id.isin([docid])]
527
- rank_pred = pred_doc[pred_doc.qid == str(inst_num)]
528
- if rank_pred.empty:
529
- ranks2.append("-")
530
- else:
531
- ranks2.append(rank_pred.iloc[0]["rank"])
532
- # st.subheader("Ranked of Documents")
533
- # st.markdown(f"Rank: {rank_pred}")
534
- ranking_str2 = ",".join([str(item) for item in ranks2])
535
- if ranking_str2 == "":
536
- ranking_str2 = "-"
537
- rank_col2.metric("Run 2 " + f"Rank of Relevant Doc(s)", ranking_str2)
538
-
539
-
540
- ranks1 = []
541
- for docid in relevant_docs:
542
- pred_doc = run1_pandas[run1_pandas.doc_id.isin([docid])]
543
- rank_pred = pred_doc[pred_doc.qid == str(inst_num)]
544
- if rank_pred.empty:
545
- ranks1.append("-")
546
- else:
547
- ranks1.append(rank_pred.iloc[0]["rank"])
548
- # st.subheader("Ranked of Documents")
549
- # st.markdown(f"Rank: {rank_pred}")
550
- ranking_str1 = ",".join([str(item) for item in ranks1])
551
- if ranking_str1 == "":
552
- ranking_str1 = "-"
553
- rank_col1.metric("Run 1 " + f"Rank of Relevant Doc(s)", ranking_str1)
554
-
555
-
556
- st.divider()
557
-
558
-
559
- container_two_docs_ranked = st.container()
560
- col_run1, col_run2 = container_two_docs_ranked.columns(2, gap="medium")
561
-
562
- if col_run1.checkbox('Show top ranked documents for Run 1', key=f"{inst_index}top-1run"):
563
- col_run1.subheader("Top N Ranked Documents")
564
- if doc_expansion1 is not None and run1_uses_doc_expansion != "None":
565
- show_orig_rel_ranked1 = col_run1.checkbox("Show Original Ranked Doc(s)", key=f"{inst_index}relorigdocs1", value=False)
566
-
567
- run1_top_n = run1_pandas[run1_pandas.qid == str(inst_num)].sort_values(["score", "doc_id"], ascending=[False, False])[:top_n]
568
- run1_top_n_docs = [corpus[str(doc_id)] for doc_id in run1_top_n.doc_id.tolist()]
569
-
570
- if doc_expansion1 is not None and run1_uses_doc_expansion != "None" and not show_orig_rel_ranked1:
571
- run1_top_n_docs_alt = [doc_expansion1[str(doc_id)] for doc_id in run1_top_n.doc_id.tolist()]
572
- for d_idx, doc in enumerate(run1_top_n_docs):
573
- alt_text = run1_top_n_docs_alt[d_idx]["text"]
574
- doc_text = combine(doc["text"], alt_text, run1_uses_doc_expansion)
575
- if use_model_saliency:
576
- if col_run1.checkbox("Show Model Saliency", key=f"{inst_index}model_saliency{d_idx}ranked1", value=False):
577
- col_run1.markdown(get_saliency(query_text1, doc_text),unsafe_allow_html=True)
578
- else:
579
- col_run1.text_area(f"{run1_top_n['doc_id'].iloc[d_idx]}: ", doc_text, key=f"{inst_num}doc{d_idx}1")
580
- else:
581
- col_run1.text_area(f"{run1_top_n['doc_id'].iloc[d_idx]}: ", doc_text, key=f"{inst_num}doc{d_idx}1")
582
- else:
583
- for d_idx, doc in enumerate(run1_top_n_docs):
584
- if use_model_saliency:
585
- if col_run1.checkbox("Show Model Saliency", key=f"{inst_index}model_saliency{d_idx}ranked1", value=False):
586
- col_run1.markdown(get_saliency(query_text1, doc),unsafe_allow_html=True)
587
- else:
588
- col_run1.text_area(f"{run1_top_n['doc_id'].iloc[d_idx]}: ", doc["text"], key=f"{inst_num}doc{d_idx}1")
589
- else:
590
- col_run1.text_area(f"{run1_top_n['doc_id'].iloc[d_idx]}: ", doc["text"], key=f"{inst_num}doc{d_idx}1")
591
-
592
-
593
- if col_run2.checkbox('Show top ranked documents for Run 2', key=f"{inst_index}top-2run"):
594
- col_run2.subheader("Top N Ranked Documents")
595
- if doc_expansion2 is not None and run2_uses_doc_expansion != "None":
596
- show_orig_rel_ranked2 = col_run2.checkbox("Show Original Ranked Doc(s)", key=f"{inst_index}relorigdocs2", value=False)
597
- run2_top_n = run2_pandas[run2_pandas.qid == str(inst_num)].sort_values(["score", "doc_id"], ascending=[False, False])[:top_n]
598
- run2_top_n_docs = [corpus[str(doc_id)] for doc_id in run2_top_n.doc_id.tolist()]
599
-
600
-
601
- if doc_expansion2 is not None and run2_uses_doc_expansion != "None" and not show_orig_rel_ranked2:
602
- run2_top_n_docs_alt = [doc_expansion2[str(doc_id)] for doc_id in run2_top_n.doc_id.tolist()]
603
- for d_idx, doc in enumerate(run2_top_n_docs):
604
- alt_text = run2_top_n_docs_alt[d_idx]["text"]
605
- doc_text = combine(doc["text"], alt_text, run2_uses_doc_expansion)
606
- if use_model_saliency:
607
- if col_run2.checkbox("Show Model Saliency", key=f"{inst_index}model_saliency{d_idx}ranked2", value=False):
608
- col_run2.markdown(get_saliency(query_text2, doc_text),unsafe_allow_html=True)
609
- else:
610
- col_run2.text_area(f"{run2_top_n['doc_id'].iloc[d_idx]}: ", doc_text, key=f"{inst_num}doc{d_idx}2")
611
- else:
612
- col_run2.text_area(f"{run2_top_n['doc_id'].iloc[d_idx]}: ", doc_text, key=f"{inst_num}doc{d_idx}2")
613
- else:
614
- for d_idx, doc in enumerate(run2_top_n_docs):
615
- if use_model_saliency:
616
- if col_run2.checkbox("Show Model Saliency", key=f"{inst_index}model_saliency{d_idx}ranked2", value=False):
617
- col_run2.markdown(get_saliency(query_text2, doc),unsafe_allow_html=True)
618
- else:
619
- col_run2.text_area(f"{run2_top_n['doc_id'].iloc[d_idx]}: ", doc["text"], key=f"{inst_num}doc{d_idx}2")
620
- else:
621
- col_run2.text_area(f"{run2_top_n['doc_id'].iloc[d_idx]}: ", doc["text"], key=f"{inst_num}doc{d_idx}2")
622
-
623
- st.divider()
624
 
625
 
626
- else:
627
- st.title("Overview")
628
 
629
- st.subheader(f"Scores of {metric_name}")
630
- fig = create_boxplot_2df(results1, results2, metric_name)
631
- st.plotly_chart(fig)
632
 
633
- st.subheader(f"Score Difference of {metric_name}")
634
- fig_comp = create_boxplot_diff(results1, results2, metric_name)
635
- st.plotly_chart(fig_comp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
636
 
637
  else:
638
  st.warning("Please choose a dataset and upload a run file. If you chose \"custom\" be sure that you uploaded all files (queries, corpus, qrels)")
 
1
  import streamlit as st
2
  import os
3
  import pathlib
 
 
 
 
4
  import pandas as pd
5
  from collections import defaultdict
6
  import json
7
  import copy
8
  import plotly.express as px
9
 
10
+ from dataset_loading import load_local_qrels, load_local_corpus, load_local_queries
 
 
11
 
12
 
13
  os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
14
  st.set_page_config(layout="wide")
15
 
16
+ current_checkboxes = []
17
+ query_input = None
18
 
19
+ @st.cache_data
20
+ def convert_df(df):
21
+ # IMPORTANT: Cache the conversion to prevent computation on every rerun
22
+ return df.to_csv(path_or_buf=None, index=False, quotechar='"').encode('utf-8')
23
+
24
+
25
+ def create_histogram_relevant_docs(relevant_df):
26
+ # turn results into a dataframe and then plot
27
+ fig = px.histogram(relevant_df, x="relevant_docs")
28
+ # make it fit in one column
29
+ fig.update_layout(
30
+ height=400,
31
+ width=250
32
+ )
33
+ return fig
34
 
35
 
36
+ def get_current_data():
37
+ cur_query_data = []
38
+ cur_query = query_input.replace("\n", "\\n")
39
+ for doc_id, checkbox in current_checkboxes:
40
+ if checkbox:
41
+ cur_query_data.append({
42
+ "new_narrative": cur_query,
43
+ "qid": st.session_state.selectbox_instance,
44
+ "doc_id": doc_id,
45
+ "is_relevant": 0
46
+ })
47
 
48
+ # return the data as a CSV pandas
49
+ return convert_df(pd.DataFrame(cur_query_data))
50
 
51
+
52
+
53
+
54
+ if 'cur_instance_num' not in st.session_state:
55
+ st.session_state.cur_instance_num = -1
 
 
56
 
57
 
58
  def validate(config_option, file_loaded):
 
61
  st.stop()
62
 
63
 
 
 
 
 
 
 
 
 
 
 
 
 
64
  with st.sidebar:
65
  st.title("Options")
66
+ st.header("Upload corpus")
67
+ corpus_file = st.file_uploader("Choose a file", key="corpus")
68
+ corpus = load_local_corpus(corpus_file)
69
+ st.header("Upload queries")
70
+ queries_file = st.file_uploader("Choose a file", key="queries")
71
+ queries = load_local_queries(queries_file)
72
+ st.header("Upload qrels")
73
+ qrels_file = st.file_uploader("Choose a file", key="qrels")
74
+ qrels = load_local_qrels(qrels_file)
75
+
76
+ ## make sure all qids in qrels are in queries and write out a warning if not
77
+ if queries is not None and qrels is not None:
78
+ missing_qids = set(qrels.keys()) - set(queries.keys()) | set(queries.keys()) - set(qrels.keys())
79
+ if len(missing_qids) > 0:
80
+ st.warning(f"The following qids in qrels are not in queries and will be deleted: {missing_qids}")
81
+ # remove them from qrels and queries
82
+ for qid in missing_qids:
83
+ if qid in qrels:
84
+ del qrels[qid]
85
+ if qid in queries:
86
+ del queries[qid]
87
+
88
+ data = []
89
+ for key, value in qrels.items():
90
+ data.append({"relevant_docs": len(value)})
91
+ relevant_df = pd.DataFrame(data)
 
 
 
 
 
 
 
 
92
 
93
  z = st.header("Analysis Options")
94
  # sliderbar of how many Top N to choose
95
+ n_relevant_docs = st.slider("Number of relevant docs", 1, 999, 20)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
 
 
 
 
 
 
 
 
97
 
98
+ col1, col2 = st.columns([1, 3], gap="large")
99
 
100
+ if corpus is not None and queries is not None and qrels is not None:
101
+ with st.sidebar:
102
+ st.success("All files uploaded")
103
 
 
 
 
 
 
 
104
  with col1:
105
+ # breakpoint()
106
+ set_of_cols = set(qrels.keys())
107
+ container_for_nav = st.container()
108
+ name_of_columns = sorted([item for item in set_of_cols])
109
+ instances_to_use = name_of_columns
110
  st.title("Instances")
111
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
  def sync_from_drop():
113
  if st.session_state.selectbox_instance == "Overview":
114
  st.session_state.number_of_col = -1
 
131
  number_of_col = container_for_nav.number_input(min_value=-1, step=1, max_value=len(instances_to_use) - 1, on_change=sync_from_number, label=f"Select instance by index (up to **{len(instances_to_use) - 1}**)", key="number_of_col")
132
  selectbox_instance = container_for_nav.selectbox("Select instance by ID", ["Overview"] + name_of_columns, on_change=sync_from_drop, key="selectbox_instance")
133
  st.divider()
134
+ # make pie plot showing how many relevant docs there are per query histogram
135
+ st.header("Relevant Docs Per Query")
136
+ plotly_chart = create_histogram_relevant_docs(relevant_df)
137
+ st.plotly_chart(plotly_chart)
138
+ st.divider()
139
+ # now show the number with relevant docs less than `n_relevant_docs`
140
+ st.header("Relevant Docs Less Than {}:".format(n_relevant_docs))
141
+ st.subheader(f'{relevant_df[relevant_df["relevant_docs"] < n_relevant_docs].shape[0]} Queries')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142
 
143
 
144
  with col2:
145
+ # get instance number
146
+ inst_index = number_of_col
147
+
148
+ if inst_index >= 0:
149
+ inst_num = instances_to_use[inst_index]
150
+
151
+ st.markdown("<h1 style='text-align: center; color: black;text-decoration: underline;'>Editor</h1>", unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152
 
153
 
154
+ container = st.container()
155
+
156
 
157
+ container.divider()
 
 
158
 
159
+ container.subheader(f"Query")
160
+
161
+ query_text = queries[str(inst_num)].strip()
162
+ query_input = container.text_area(f"QID: {inst_num}", query_text)
163
+ container.divider()
164
+
165
+ ## Documents
166
+ # relevant
167
+ relevant_docs = list(qrels[str(inst_num)].keys())[:n_relevant_docs]
168
+ doc_texts = [(doc_id, corpus[doc_id]["title"] if "title" in corpus[doc_id] else "", corpus[doc_id]["text"]) for doc_id in relevant_docs]
169
+ container.subheader(f"Relevant Documents ({len(list(qrels[str(inst_num)].keys()))})")
170
+ current_checkboxes = []
171
+ for (docid, title, text) in doc_texts:
172
+ current_checkboxes.append((docid, container.checkbox(f'{docid} is Non-Relevant', key=docid)))
173
+ container.text_area(f"{docid}:", text)
174
+
175
+
176
+ container.divider()
177
+ if st.checkbox("Download data as CSV"):
178
+ st.download_button(
179
+ label="Download data as CSV",
180
+ data=get_current_data(),
181
+ file_name=f'annotation_query_{inst_num}.csv',
182
+ mime='text/csv',
183
+ )
184
+
185
+ # none checked
186
+ elif inst_index < 0:
187
+ st.title("Overview")
188
+
189
+
190
 
191
  else:
192
  st.warning("Please choose a dataset and upload a run file. If you chose \"custom\" be sure that you uploaded all files (queries, corpus, qrels)")
constants.py DELETED
@@ -1,90 +0,0 @@
1
- from ir_dataset_metadata import IR_DATASETS
2
-
3
-
4
- ALL_METRICS = [
5
- "ndcg_cut_10",
6
- "ndcg_cut_5",
7
- "ndcg_cut_15",
8
- "ndcg_cut_20",
9
- "ndcg_cut_30",
10
- "ndcg_cut_100",
11
- "ndcg_cut_200",
12
- "ndcg_cut_500",
13
- "ndcg_cut_1000",
14
- "map",
15
- "P_5",
16
- "P_10",
17
- "P_15",
18
- "P_20",
19
- "P_30",
20
- "P_100",
21
- "P_200",
22
- "P_500",
23
- "P_1000",
24
- "recall_5",
25
- "recall_10",
26
- "recall_15",
27
- "recall_20",
28
- "recall_30",
29
- "recall_100",
30
- "recall_200",
31
- "recall_500",
32
- "recall_1000",
33
- "recip_rank",
34
- "set_recall",
35
- "set_P",
36
- "set_F",
37
- "num_rel_ret",
38
- "num_ret",
39
- "num_rel",
40
- "num_q",
41
- "num_rel",
42
- "num_rel_ret"
43
- "Rprec",
44
- "bpref",
45
- "iprec_at_recall_0.00",
46
- "iprec_at_recall_0.10",
47
- "iprec_at_recall_0.20",
48
- "iprec_at_recall_0.30",
49
- "iprec_at_recall_0.40",
50
- "iprec_at_recall_0.50",
51
- "iprec_at_recall_0.60",
52
- "iprec_at_recall_0.70",
53
- "iprec_at_recall_0.80",
54
- "iprec_at_recall_0.90",
55
- "iprec_at_recall_1.00",
56
- ]
57
-
58
-
59
- BEIR = [
60
- "msmarco",
61
- "trec-covid",
62
- "nf_corpus",
63
- "bioasq",
64
- "nq",
65
- "hotpotqa",
66
- "fiqa",
67
- "signal1m",
68
- "trec-news",
69
- "robust04",
70
- "arguana",
71
- "webis-touche2020",
72
- "cqadupstack",
73
- "quora",
74
- "dbpedia-entity",
75
- "scidocs",
76
- "fever",
77
- "climate-fever",
78
- "scifact",
79
- ]
80
-
81
-
82
- LOCAL_DATASETS = [
83
- "gooaq_technical",
84
- "codesearch_py",
85
- ]
86
-
87
-
88
-
89
-
90
- ALL_DATASETS = ["", "custom"] + LOCAL_DATASETS + BEIR + IR_DATASETS
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dataset_loading.py CHANGED
@@ -1,19 +1,13 @@
1
  import streamlit as st
2
  import os
3
  import pathlib
4
- import beir
5
- from beir import util
6
- from beir.datasets.data_loader import GenericDataLoader
7
- import pytrec_eval
8
  import pandas as pd
9
  from collections import defaultdict
10
  import json
11
  import copy
12
- import ir_datasets
13
 
14
 
15
- from constants import BEIR, IR_DATASETS, LOCAL_DATASETS
16
-
17
 
18
  @st.cache_data
19
  def load_local_corpus(corpus_file, columns_to_combine=["title", "text"]):
@@ -90,28 +84,6 @@ def load_local_qrels(qrels_file):
90
  return qid2did2label
91
 
92
 
93
- @st.cache_data
94
- def load_run(f_run):
95
- run = pytrec_eval.parse_run(copy.deepcopy(f_run))
96
- # convert bytes to strings for keys
97
- new_run = defaultdict(dict)
98
- for key, sub_dict in run.items():
99
- new_run[key.decode("utf-8")] = {k.decode("utf-8"): v for k, v in sub_dict.items()}
100
-
101
- run_pandas = pd.read_csv(f_run, header=None, index_col=None, sep="\t")
102
- run_pandas.columns = ["qid", "generic", "doc_id", "rank", "score", "model"]
103
- run_pandas.doc_id = run_pandas.doc_id.astype(str)
104
- run_pandas.qid = run_pandas.qid.astype(str)
105
- run_pandas["rank"] = run_pandas["rank"].astype(int)
106
- run_pandas.score = run_pandas.score.astype(float)
107
- all_groups = []
108
- for qid, sub_df in run_pandas.groupby("qid"):
109
- sub_df.sort_values(["score", "doc_id"], ascending=[False, False])
110
- sub_df["rank"] = list(range(1, len(sub_df) + 1))
111
- all_groups.append(sub_df)
112
- run_pandas = pd.concat(all_groups)
113
- return new_run, run_pandas
114
-
115
 
116
  @st.cache_data
117
  def load_jsonl(f):
@@ -137,46 +109,6 @@ def load_jsonl(f):
137
  return did2text, sub_did2text
138
 
139
 
140
- @st.cache_data(persist="disk")
141
- def get_beir(dataset: str):
142
- url = "https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/{}.zip".format(dataset)
143
- out_dir = os.path.join(pathlib.Path(__file__).parent.absolute(), "datasets")
144
- data_path = util.download_and_unzip(url, out_dir)
145
- return GenericDataLoader(data_folder=data_path).load(split="test")
146
-
147
-
148
- @st.cache_data(persist="disk")
149
- def get_ir_datasets(dataset_name: str, input_fields_doc: str = None, input_fields_query: str = None):
150
- dataset = ir_datasets.load(dataset_name)
151
- queries = {}
152
- for qid, query in dataset.queries_iter():
153
- if input_fields_query is None:
154
- if type(query) == str:
155
- queries[qid] = query
156
- else:
157
- # get all fields that exist in query
158
- all_fields = {field: getattr(query, field) for field in query._fields}
159
- # put all fields into a single string
160
- queries[qid] = " ".join([str(v) for v in all_fields.values()])
161
- else:
162
- all_fields = {field: getattr(query, field) for field in input_fields_query}
163
- queries[qid] = " ".join([str(v) for v in all_fields.values()])
164
-
165
- corpus = {}
166
- for doc in dataset.docs_iter():
167
- if input_fields_doc is None:
168
- if type(doc) == str:
169
- corpus[doc.doc_id] = {"text": doc}
170
- else: # get all fields that exist in query
171
- all_fields = {field: getattr(doc, field) for field in doc._fields}
172
- corpus[doc.doc_id] = {"text": " ".join([str(v) for v in all_fields.values()])}
173
- else:
174
- all_fields = {field: getattr(doc, field) for field in input_fields_doc}
175
- corpus[doc.doc_id] = {"text": " ".join([str(v) for v in all_fields.values()])}
176
-
177
- # return corpus, queries, qrels
178
- return corpus, queries, dataset.qrels_dict()
179
-
180
 
181
  @st.cache_data(persist="disk")
182
  def get_dataset(dataset_name: str, input_fields_doc, input_fields_query):
@@ -188,15 +120,5 @@ def get_dataset(dataset_name: str, input_fields_doc, input_fields_query):
188
  if dataset_name == "":
189
  return {}, {}, {}
190
 
191
- if dataset_name in BEIR:
192
- return get_beir(dataset_name)
193
- elif dataset_name in IR_DATASETS:
194
- return get_ir_datasets(dataset_name, input_fields_doc, input_fields_query)
195
- elif dataset_name in LOCAL_DATASETS:
196
- base_path = f"local_datasets/{dataset_name}"
197
- corpus_file = open(f"{base_path}/corpus.jsonl", "r")
198
- queries_file = open(f"{base_path}/queries.jsonl", "r")
199
- qrels_file = open(f"{base_path}/qrels/test.tsv", "r")
200
- return load_local_corpus(corpus_file), load_local_queries(queries_file), load_local_qrels(qrels_file)
201
  else:
202
  raise NotImplementedError("Dataset not implemented")
 
1
  import streamlit as st
2
  import os
3
  import pathlib
 
 
 
 
4
  import pandas as pd
5
  from collections import defaultdict
6
  import json
7
  import copy
8
+ import plotly.express as px
9
 
10
 
 
 
11
 
12
  @st.cache_data
13
  def load_local_corpus(corpus_file, columns_to_combine=["title", "text"]):
 
84
  return qid2did2label
85
 
86
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
 
88
  @st.cache_data
89
  def load_jsonl(f):
 
109
  return did2text, sub_did2text
110
 
111
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
 
113
  @st.cache_data(persist="disk")
114
  def get_dataset(dataset_name: str, input_fields_doc, input_fields_query):
 
120
  if dataset_name == "":
121
  return {}, {}, {}
122
 
 
 
 
 
 
 
 
 
 
 
123
  else:
124
  raise NotImplementedError("Dataset not implemented")
ir_dataset_metadata.py DELETED
@@ -1,486 +0,0 @@
1
-
2
- IR_DATASETS = [
3
- "antique/test",
4
- "antique/test/non-offensive",
5
- "antique/train",
6
- "antique/train/split200-train",
7
- "antique/train/split200-valid",
8
- "aol-ia",
9
- "aquaint/trec-robust-2005",
10
- "argsme/1.0/touche-2020-task-1/uncorrected",
11
- "argsme/2020-04-01/processed/touche-2022-task-1",
12
- "argsme/2020-04-01/touche-2020-task-1",
13
- "argsme/2020-04-01/touche-2020-task-1/uncorrected",
14
- "argsme/2020-04-01/touche-2021-task-1",
15
- "beir/arguana",
16
- "beir/climate-fever",
17
- "beir/cqadupstack/android",
18
- "beir/cqadupstack/english",
19
- "beir/cqadupstack/gaming",
20
- "beir/cqadupstack/gis",
21
- "beir/cqadupstack/mathematica",
22
- "beir/cqadupstack/physics",
23
- "beir/cqadupstack/programmers",
24
- "beir/cqadupstack/stats",
25
- "beir/cqadupstack/tex",
26
- "beir/cqadupstack/unix",
27
- "beir/cqadupstack/webmasters",
28
- "beir/cqadupstack/wordpress",
29
- "beir/dbpedia-entity/dev",
30
- "beir/dbpedia-entity/test",
31
- "beir/fever/dev",
32
- "beir/fever/test",
33
- "beir/fever/train",
34
- "beir/fiqa/dev",
35
- "beir/fiqa/test",
36
- "beir/fiqa/train",
37
- "beir/hotpotqa/dev",
38
- "beir/hotpotqa/test",
39
- "beir/hotpotqa/train",
40
- "beir/msmarco/dev",
41
- "beir/msmarco/test",
42
- "beir/msmarco/train",
43
- "beir/nfcorpus/dev",
44
- "beir/nfcorpus/test",
45
- "beir/nfcorpus/train",
46
- "beir/nq",
47
- "beir/quora/dev",
48
- "beir/quora/test",
49
- "beir/scidocs",
50
- "beir/scifact/test",
51
- "beir/scifact/train",
52
- "beir/trec-covid",
53
- "beir/webis-touche2020",
54
- "beir/webis-touche2020/v2",
55
- "car/v1.5/test200",
56
- "car/v1.5/train/fold0",
57
- "car/v1.5/train/fold1",
58
- "car/v1.5/train/fold2",
59
- "car/v1.5/train/fold3",
60
- "car/v1.5/train/fold4",
61
- "car/v1.5/trec-y1/auto",
62
- "car/v1.5/trec-y1/manual",
63
- "clinicaltrials/2017/trec-pm-2017",
64
- "clinicaltrials/2017/trec-pm-2018",
65
- "clinicaltrials/2019/trec-pm-2019",
66
- "clinicaltrials/2021/trec-ct-2021",
67
- "clueweb09/catb/trec-web-2009",
68
- "clueweb09/catb/trec-web-2009/diversity",
69
- "clueweb09/catb/trec-web-2010",
70
- "clueweb09/catb/trec-web-2010/diversity",
71
- "clueweb09/catb/trec-web-2011",
72
- "clueweb09/catb/trec-web-2011/diversity",
73
- "clueweb09/catb/trec-web-2012",
74
- "clueweb09/catb/trec-web-2012/diversity",
75
- "clueweb09/en/trec-web-2009",
76
- "clueweb09/en/trec-web-2009/diversity",
77
- "clueweb09/en/trec-web-2010",
78
- "clueweb09/en/trec-web-2010/diversity",
79
- "clueweb09/en/trec-web-2011",
80
- "clueweb09/en/trec-web-2011/diversity",
81
- "clueweb09/en/trec-web-2012",
82
- "clueweb09/en/trec-web-2012/diversity",
83
- "clueweb09/trec-mq-2009",
84
- "clueweb12/b13/clef-ehealth",
85
- "clueweb12/b13/clef-ehealth/cs",
86
- "clueweb12/b13/clef-ehealth/de",
87
- "clueweb12/b13/clef-ehealth/fr",
88
- "clueweb12/b13/clef-ehealth/hu",
89
- "clueweb12/b13/clef-ehealth/pl",
90
- "clueweb12/b13/clef-ehealth/sv",
91
- "clueweb12/b13/ntcir-www-1",
92
- "clueweb12/b13/ntcir-www-2",
93
- "clueweb12/b13/trec-misinfo-2019",
94
- "clueweb12/touche-2020-task-2",
95
- "clueweb12/touche-2021-task-2",
96
- "clueweb12/touche-2022-task-2",
97
- "clueweb12/touche-2022-task-2/expanded-doc-t5-query",
98
- "clueweb12/trec-web-2013",
99
- "clueweb12/trec-web-2013/diversity",
100
- "clueweb12/trec-web-2014",
101
- "clueweb12/trec-web-2014/diversity",
102
- "codec",
103
- "codec/economics",
104
- "codec/history",
105
- "codec/politics",
106
- "codesearchnet/challenge",
107
- "codesearchnet/test",
108
- "codesearchnet/train",
109
- "codesearchnet/valid",
110
- "cord19/fulltext/trec-covid",
111
- "cord19/trec-covid",
112
- "cord19/trec-covid/round1",
113
- "cord19/trec-covid/round2",
114
- "cord19/trec-covid/round3",
115
- "cord19/trec-covid/round4",
116
- "cord19/trec-covid/round5",
117
- "cranfield",
118
- "disks45/nocr/trec-robust-2004",
119
- "disks45/nocr/trec-robust-2004/fold1",
120
- "disks45/nocr/trec-robust-2004/fold2",
121
- "disks45/nocr/trec-robust-2004/fold3",
122
- "disks45/nocr/trec-robust-2004/fold4",
123
- "disks45/nocr/trec-robust-2004/fold5",
124
- "disks45/nocr/trec7",
125
- "disks45/nocr/trec8",
126
- "dpr-w100/natural-questions/dev",
127
- "dpr-w100/natural-questions/train",
128
- "dpr-w100/trivia-qa/dev",
129
- "dpr-w100/trivia-qa/train",
130
- "gov/trec-web-2002",
131
- "gov/trec-web-2002/named-page",
132
- "gov/trec-web-2003",
133
- "gov/trec-web-2003/named-page",
134
- "gov/trec-web-2004",
135
- "gov2/trec-mq-2007",
136
- "gov2/trec-mq-2008",
137
- "gov2/trec-tb-2004",
138
- "gov2/trec-tb-2005",
139
- "gov2/trec-tb-2005/efficiency",
140
- "gov2/trec-tb-2005/named-page",
141
- "gov2/trec-tb-2006",
142
- "gov2/trec-tb-2006/efficiency",
143
- "gov2/trec-tb-2006/efficiency/stream3",
144
- "gov2/trec-tb-2006/named-page",
145
- "hc4/fa/dev",
146
- "hc4/fa/test",
147
- "hc4/fa/train",
148
- "hc4/ru/dev",
149
- "hc4/ru/test",
150
- "hc4/ru/train",
151
- "hc4/zh/dev",
152
- "hc4/zh/test",
153
- "hc4/zh/train",
154
- "highwire/trec-genomics-2006",
155
- "highwire/trec-genomics-2007",
156
- "istella22/test",
157
- "istella22/test/fold1",
158
- "istella22/test/fold2",
159
- "istella22/test/fold3",
160
- "istella22/test/fold4",
161
- "istella22/test/fold5",
162
- "kilt/codec",
163
- "kilt/codec/economics",
164
- "kilt/codec/history",
165
- "kilt/codec/politics",
166
- "lotte/lifestyle/dev/forum",
167
- "lotte/lifestyle/dev/search",
168
- "lotte/lifestyle/test/forum",
169
- "lotte/lifestyle/test/search",
170
- "lotte/pooled/dev/forum",
171
- "lotte/pooled/dev/search",
172
- "lotte/pooled/test/forum",
173
- "lotte/pooled/test/search",
174
- "lotte/recreation/dev/forum",
175
- "lotte/recreation/dev/search",
176
- "lotte/recreation/test/forum",
177
- "lotte/recreation/test/search",
178
- "lotte/science/dev/forum",
179
- "lotte/science/dev/search",
180
- "lotte/science/test/forum",
181
- "lotte/science/test/search",
182
- "lotte/technology/dev/forum",
183
- "lotte/technology/dev/search",
184
- "lotte/technology/test/forum",
185
- "lotte/technology/test/search",
186
- "lotte/writing/dev/forum",
187
- "lotte/writing/dev/search",
188
- "lotte/writing/test/forum",
189
- "lotte/writing/test/search",
190
- "medline/2004/trec-genomics-2004",
191
- "medline/2004/trec-genomics-2005",
192
- "medline/2017/trec-pm-2017",
193
- "medline/2017/trec-pm-2018",
194
- "mmarco/de/dev",
195
- "mmarco/de/dev/small",
196
- "mmarco/de/train",
197
- "mmarco/es/dev",
198
- "mmarco/es/dev/small",
199
- "mmarco/es/train",
200
- "mmarco/fr/dev",
201
- "mmarco/fr/dev/small",
202
- "mmarco/fr/train",
203
- "mmarco/id/dev",
204
- "mmarco/id/dev/small",
205
- "mmarco/id/train",
206
- "mmarco/it/dev",
207
- "mmarco/it/dev/small",
208
- "mmarco/it/train",
209
- "mmarco/pt/dev",
210
- "mmarco/pt/dev/small",
211
- "mmarco/pt/dev/small/v1.1",
212
- "mmarco/pt/dev/v1.1",
213
- "mmarco/pt/train",
214
- "mmarco/pt/train/v1.1",
215
- "mmarco/ru/dev",
216
- "mmarco/ru/dev/small",
217
- "mmarco/ru/train",
218
- "mmarco/v2/ar/dev",
219
- "mmarco/v2/ar/dev/small",
220
- "mmarco/v2/ar/train",
221
- "mmarco/v2/de/dev",
222
- "mmarco/v2/de/dev/small",
223
- "mmarco/v2/de/train",
224
- "mmarco/v2/dt/dev",
225
- "mmarco/v2/dt/dev/small",
226
- "mmarco/v2/dt/train",
227
- "mmarco/v2/es/dev",
228
- "mmarco/v2/es/dev/small",
229
- "mmarco/v2/es/train",
230
- "mmarco/v2/fr/dev",
231
- "mmarco/v2/fr/dev/small",
232
- "mmarco/v2/fr/train",
233
- "mmarco/v2/hi/dev",
234
- "mmarco/v2/hi/dev/small",
235
- "mmarco/v2/hi/train",
236
- "mmarco/v2/id/dev",
237
- "mmarco/v2/id/dev/small",
238
- "mmarco/v2/id/train",
239
- "mmarco/v2/it/dev",
240
- "mmarco/v2/it/dev/small",
241
- "mmarco/v2/it/train",
242
- "mmarco/v2/ja/dev",
243
- "mmarco/v2/ja/dev/small",
244
- "mmarco/v2/ja/train",
245
- "mmarco/v2/pt/dev",
246
- "mmarco/v2/pt/dev/small",
247
- "mmarco/v2/pt/train",
248
- "mmarco/v2/ru/dev",
249
- "mmarco/v2/ru/dev/small",
250
- "mmarco/v2/ru/train",
251
- "mmarco/v2/vi/dev",
252
- "mmarco/v2/vi/dev/small",
253
- "mmarco/v2/vi/train",
254
- "mmarco/v2/zh/dev",
255
- "mmarco/v2/zh/dev/small",
256
- "mmarco/v2/zh/train",
257
- "mmarco/zh/dev",
258
- "mmarco/zh/dev/small",
259
- "mmarco/zh/dev/small/v1.1",
260
- "mmarco/zh/dev/v1.1",
261
- "mmarco/zh/train",
262
- "mr-tydi/ar",
263
- "mr-tydi/ar/dev",
264
- "mr-tydi/ar/test",
265
- "mr-tydi/ar/train",
266
- "mr-tydi/bn",
267
- "mr-tydi/bn/dev",
268
- "mr-tydi/bn/test",
269
- "mr-tydi/bn/train",
270
- "mr-tydi/en",
271
- "mr-tydi/en/dev",
272
- "mr-tydi/en/test",
273
- "mr-tydi/en/train",
274
- "mr-tydi/fi",
275
- "mr-tydi/fi/dev",
276
- "mr-tydi/fi/test",
277
- "mr-tydi/fi/train",
278
- "mr-tydi/id",
279
- "mr-tydi/id/dev",
280
- "mr-tydi/id/test",
281
- "mr-tydi/id/train",
282
- "mr-tydi/ja",
283
- "mr-tydi/ja/dev",
284
- "mr-tydi/ja/test",
285
- "mr-tydi/ja/train",
286
- "mr-tydi/ko",
287
- "mr-tydi/ko/dev",
288
- "mr-tydi/ko/test",
289
- "mr-tydi/ko/train",
290
- "mr-tydi/ru",
291
- "mr-tydi/ru/dev",
292
- "mr-tydi/ru/test",
293
- "mr-tydi/ru/train",
294
- "mr-tydi/sw",
295
- "mr-tydi/sw/dev",
296
- "mr-tydi/sw/test",
297
- "mr-tydi/sw/train",
298
- "mr-tydi/te",
299
- "mr-tydi/te/dev",
300
- "mr-tydi/te/test",
301
- "mr-tydi/te/train",
302
- "mr-tydi/th",
303
- "mr-tydi/th/dev",
304
- "mr-tydi/th/test",
305
- "mr-tydi/th/train",
306
- "msmarco-document-v2/dev1",
307
- "msmarco-document-v2/dev2",
308
- "msmarco-document-v2/train",
309
- "msmarco-document-v2/trec-dl-2019",
310
- "msmarco-document-v2/trec-dl-2019/judged",
311
- "msmarco-document-v2/trec-dl-2020",
312
- "msmarco-document-v2/trec-dl-2020/judged",
313
- "msmarco-document-v2/trec-dl-2021",
314
- "msmarco-document-v2/trec-dl-2021/judged",
315
- "msmarco-document-v2/trec-dl-2022",
316
- "msmarco-document-v2/trec-dl-2022/judged",
317
- "msmarco-document/dev",
318
- "msmarco-document/orcas",
319
- "msmarco-document/train",
320
- "msmarco-document/trec-dl-2019",
321
- "msmarco-document/trec-dl-2019/judged",
322
- "msmarco-document/trec-dl-2020",
323
- "msmarco-document/trec-dl-2020/judged",
324
- "msmarco-document/trec-dl-hard",
325
- "msmarco-document/trec-dl-hard/fold1",
326
- "msmarco-document/trec-dl-hard/fold2",
327
- "msmarco-document/trec-dl-hard/fold3",
328
- "msmarco-document/trec-dl-hard/fold4",
329
- "msmarco-document/trec-dl-hard/fold5",
330
- "msmarco-passage-v2/dev1",
331
- "msmarco-passage-v2/dev2",
332
- "msmarco-passage-v2/train",
333
- "msmarco-passage-v2/trec-dl-2021",
334
- "msmarco-passage-v2/trec-dl-2021/judged",
335
- "msmarco-passage-v2/trec-dl-2022",
336
- "msmarco-passage-v2/trec-dl-2022/judged",
337
- "msmarco-passage/dev",
338
- "msmarco-passage/dev/2",
339
- "msmarco-passage/dev/judged",
340
- "msmarco-passage/dev/small",
341
- "msmarco-passage/train",
342
- "msmarco-passage/train/judged",
343
- "msmarco-passage/train/medical",
344
- "msmarco-passage/train/split200-train",
345
- "msmarco-passage/train/split200-valid",
346
- "msmarco-passage/train/triples-small",
347
- "msmarco-passage/train/triples-v2",
348
- "msmarco-passage/trec-dl-2019",
349
- "msmarco-passage/trec-dl-2019/judged",
350
- "msmarco-passage/trec-dl-2020",
351
- "msmarco-passage/trec-dl-2020/judged",
352
- "msmarco-passage/trec-dl-hard",
353
- "msmarco-passage/trec-dl-hard/fold1",
354
- "msmarco-passage/trec-dl-hard/fold2",
355
- "msmarco-passage/trec-dl-hard/fold3",
356
- "msmarco-passage/trec-dl-hard/fold4",
357
- "msmarco-passage/trec-dl-hard/fold5",
358
- "msmarco-qna/dev",
359
- "msmarco-qna/train",
360
- "natural-questions/dev",
361
- "natural-questions/train",
362
- "neuclir/1/fa/hc4-filtered",
363
- "neuclir/1/ru/hc4-filtered",
364
- "neuclir/1/zh/hc4-filtered",
365
- "neumarco/fa/dev",
366
- "neumarco/fa/dev/judged",
367
- "neumarco/fa/dev/small",
368
- "neumarco/fa/train",
369
- "neumarco/fa/train/judged",
370
- "neumarco/ru/dev",
371
- "neumarco/ru/dev/judged",
372
- "neumarco/ru/dev/small",
373
- "neumarco/ru/train",
374
- "neumarco/ru/train/judged",
375
- "neumarco/zh/dev",
376
- "neumarco/zh/dev/judged",
377
- "neumarco/zh/dev/small",
378
- "neumarco/zh/train",
379
- "neumarco/zh/train/judged",
380
- "nfcorpus/dev",
381
- "nfcorpus/dev/nontopic",
382
- "nfcorpus/dev/video",
383
- "nfcorpus/test",
384
- "nfcorpus/test/nontopic",
385
- "nfcorpus/test/video",
386
- "nfcorpus/train",
387
- "nfcorpus/train/nontopic",
388
- "nfcorpus/train/video",
389
- "nyt/trec-core-2017",
390
- "nyt/wksup",
391
- "nyt/wksup/train",
392
- "nyt/wksup/valid",
393
- "pmc/v1/trec-cds-2014",
394
- "pmc/v1/trec-cds-2015",
395
- "pmc/v2/trec-cds-2016",
396
- "sara",
397
- "touche-image/2022-06-13/touche-2022-task-3",
398
- "trec-arabic/ar2001",
399
- "trec-arabic/ar2002",
400
- "trec-cast/v0/train",
401
- "trec-cast/v0/train/judged",
402
- "trec-cast/v1/2019",
403
- "trec-cast/v1/2019/judged",
404
- "trec-cast/v1/2020",
405
- "trec-cast/v1/2020/judged",
406
- "trec-fair-2021/eval",
407
- "trec-fair-2021/train",
408
- "trec-fair/2021/eval",
409
- "trec-fair/2021/train",
410
- "trec-fair/2022/train",
411
- "trec-mandarin/trec5",
412
- "trec-mandarin/trec6",
413
- "trec-robust04",
414
- "trec-robust04/fold1",
415
- "trec-robust04/fold2",
416
- "trec-robust04/fold3",
417
- "trec-robust04/fold4",
418
- "trec-robust04/fold5",
419
- "trec-spanish/trec3",
420
- "trec-spanish/trec4",
421
- "trec-tot/2023/dev",
422
- "trec-tot/2023/train",
423
- "tripclick/train",
424
- "tripclick/train/head",
425
- "tripclick/train/head/dctr",
426
- "tripclick/train/hofstaetter-triples",
427
- "tripclick/train/tail",
428
- "tripclick/train/torso",
429
- "tripclick/val",
430
- "tripclick/val/head",
431
- "tripclick/val/head/dctr",
432
- "tripclick/val/tail",
433
- "tripclick/val/torso",
434
- "tweets2013-ia/trec-mb-2013",
435
- "tweets2013-ia/trec-mb-2014",
436
- "vaswani",
437
- "wapo/v2/trec-core-2018",
438
- "wapo/v2/trec-news-2018",
439
- "wapo/v2/trec-news-2019",
440
- "wikiclir/ar",
441
- "wikiclir/ca",
442
- "wikiclir/cs",
443
- "wikiclir/de",
444
- "wikiclir/en-simple",
445
- "wikiclir/es",
446
- "wikiclir/fi",
447
- "wikiclir/fr",
448
- "wikiclir/it",
449
- "wikiclir/ja",
450
- "wikiclir/ko",
451
- "wikiclir/nl",
452
- "wikiclir/nn",
453
- "wikiclir/no",
454
- "wikiclir/pl",
455
- "wikiclir/pt",
456
- "wikiclir/ro",
457
- "wikiclir/ru",
458
- "wikiclir/sv",
459
- "wikiclir/sw",
460
- "wikiclir/tl",
461
- "wikiclir/tr",
462
- "wikiclir/uk",
463
- "wikiclir/vi",
464
- "wikiclir/zh",
465
- "wikir/en1k/test",
466
- "wikir/en1k/training",
467
- "wikir/en1k/validation",
468
- "wikir/en59k/test",
469
- "wikir/en59k/training",
470
- "wikir/en59k/validation",
471
- "wikir/en78k/test",
472
- "wikir/en78k/training",
473
- "wikir/en78k/validation",
474
- "wikir/ens78k/test",
475
- "wikir/ens78k/training",
476
- "wikir/ens78k/validation",
477
- "wikir/es13k/test",
478
- "wikir/es13k/training",
479
- "wikir/es13k/validation",
480
- "wikir/fr14k/test",
481
- "wikir/fr14k/training",
482
- "wikir/fr14k/validation",
483
- "wikir/it16k/test",
484
- "wikir/it16k/training",
485
- "wikir/it16k/validation"
486
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ir_dataset_names.json DELETED
@@ -1,485 +0,0 @@
1
- [
2
- "antique/test",
3
- "antique/test/non-offensive",
4
- "antique/train",
5
- "antique/train/split200-train",
6
- "antique/train/split200-valid",
7
- "aol-ia",
8
- "aquaint/trec-robust-2005",
9
- "argsme/1.0/touche-2020-task-1/uncorrected",
10
- "argsme/2020-04-01/processed/touche-2022-task-1",
11
- "argsme/2020-04-01/touche-2020-task-1",
12
- "argsme/2020-04-01/touche-2020-task-1/uncorrected",
13
- "argsme/2020-04-01/touche-2021-task-1",
14
- "beir/arguana",
15
- "beir/climate-fever",
16
- "beir/cqadupstack/android",
17
- "beir/cqadupstack/english",
18
- "beir/cqadupstack/gaming",
19
- "beir/cqadupstack/gis",
20
- "beir/cqadupstack/mathematica",
21
- "beir/cqadupstack/physics",
22
- "beir/cqadupstack/programmers",
23
- "beir/cqadupstack/stats",
24
- "beir/cqadupstack/tex",
25
- "beir/cqadupstack/unix",
26
- "beir/cqadupstack/webmasters",
27
- "beir/cqadupstack/wordpress",
28
- "beir/dbpedia-entity/dev",
29
- "beir/dbpedia-entity/test",
30
- "beir/fever/dev",
31
- "beir/fever/test",
32
- "beir/fever/train",
33
- "beir/fiqa/dev",
34
- "beir/fiqa/test",
35
- "beir/fiqa/train",
36
- "beir/hotpotqa/dev",
37
- "beir/hotpotqa/test",
38
- "beir/hotpotqa/train",
39
- "beir/msmarco/dev",
40
- "beir/msmarco/test",
41
- "beir/msmarco/train",
42
- "beir/nfcorpus/dev",
43
- "beir/nfcorpus/test",
44
- "beir/nfcorpus/train",
45
- "beir/nq",
46
- "beir/quora/dev",
47
- "beir/quora/test",
48
- "beir/scidocs",
49
- "beir/scifact/test",
50
- "beir/scifact/train",
51
- "beir/trec-covid",
52
- "beir/webis-touche2020",
53
- "beir/webis-touche2020/v2",
54
- "car/v1.5/test200",
55
- "car/v1.5/train/fold0",
56
- "car/v1.5/train/fold1",
57
- "car/v1.5/train/fold2",
58
- "car/v1.5/train/fold3",
59
- "car/v1.5/train/fold4",
60
- "car/v1.5/trec-y1/auto",
61
- "car/v1.5/trec-y1/manual",
62
- "clinicaltrials/2017/trec-pm-2017",
63
- "clinicaltrials/2017/trec-pm-2018",
64
- "clinicaltrials/2019/trec-pm-2019",
65
- "clinicaltrials/2021/trec-ct-2021",
66
- "clueweb09/catb/trec-web-2009",
67
- "clueweb09/catb/trec-web-2009/diversity",
68
- "clueweb09/catb/trec-web-2010",
69
- "clueweb09/catb/trec-web-2010/diversity",
70
- "clueweb09/catb/trec-web-2011",
71
- "clueweb09/catb/trec-web-2011/diversity",
72
- "clueweb09/catb/trec-web-2012",
73
- "clueweb09/catb/trec-web-2012/diversity",
74
- "clueweb09/en/trec-web-2009",
75
- "clueweb09/en/trec-web-2009/diversity",
76
- "clueweb09/en/trec-web-2010",
77
- "clueweb09/en/trec-web-2010/diversity",
78
- "clueweb09/en/trec-web-2011",
79
- "clueweb09/en/trec-web-2011/diversity",
80
- "clueweb09/en/trec-web-2012",
81
- "clueweb09/en/trec-web-2012/diversity",
82
- "clueweb09/trec-mq-2009",
83
- "clueweb12/b13/clef-ehealth",
84
- "clueweb12/b13/clef-ehealth/cs",
85
- "clueweb12/b13/clef-ehealth/de",
86
- "clueweb12/b13/clef-ehealth/fr",
87
- "clueweb12/b13/clef-ehealth/hu",
88
- "clueweb12/b13/clef-ehealth/pl",
89
- "clueweb12/b13/clef-ehealth/sv",
90
- "clueweb12/b13/ntcir-www-1",
91
- "clueweb12/b13/ntcir-www-2",
92
- "clueweb12/b13/trec-misinfo-2019",
93
- "clueweb12/touche-2020-task-2",
94
- "clueweb12/touche-2021-task-2",
95
- "clueweb12/touche-2022-task-2",
96
- "clueweb12/touche-2022-task-2/expanded-doc-t5-query",
97
- "clueweb12/trec-web-2013",
98
- "clueweb12/trec-web-2013/diversity",
99
- "clueweb12/trec-web-2014",
100
- "clueweb12/trec-web-2014/diversity",
101
- "codec",
102
- "codec/economics",
103
- "codec/history",
104
- "codec/politics",
105
- "codesearchnet/challenge",
106
- "codesearchnet/test",
107
- "codesearchnet/train",
108
- "codesearchnet/valid",
109
- "cord19/fulltext/trec-covid",
110
- "cord19/trec-covid",
111
- "cord19/trec-covid/round1",
112
- "cord19/trec-covid/round2",
113
- "cord19/trec-covid/round3",
114
- "cord19/trec-covid/round4",
115
- "cord19/trec-covid/round5",
116
- "cranfield",
117
- "disks45/nocr/trec-robust-2004",
118
- "disks45/nocr/trec-robust-2004/fold1",
119
- "disks45/nocr/trec-robust-2004/fold2",
120
- "disks45/nocr/trec-robust-2004/fold3",
121
- "disks45/nocr/trec-robust-2004/fold4",
122
- "disks45/nocr/trec-robust-2004/fold5",
123
- "disks45/nocr/trec7",
124
- "disks45/nocr/trec8",
125
- "dpr-w100/natural-questions/dev",
126
- "dpr-w100/natural-questions/train",
127
- "dpr-w100/trivia-qa/dev",
128
- "dpr-w100/trivia-qa/train",
129
- "gov/trec-web-2002",
130
- "gov/trec-web-2002/named-page",
131
- "gov/trec-web-2003",
132
- "gov/trec-web-2003/named-page",
133
- "gov/trec-web-2004",
134
- "gov2/trec-mq-2007",
135
- "gov2/trec-mq-2008",
136
- "gov2/trec-tb-2004",
137
- "gov2/trec-tb-2005",
138
- "gov2/trec-tb-2005/efficiency",
139
- "gov2/trec-tb-2005/named-page",
140
- "gov2/trec-tb-2006",
141
- "gov2/trec-tb-2006/efficiency",
142
- "gov2/trec-tb-2006/efficiency/stream3",
143
- "gov2/trec-tb-2006/named-page",
144
- "hc4/fa/dev",
145
- "hc4/fa/test",
146
- "hc4/fa/train",
147
- "hc4/ru/dev",
148
- "hc4/ru/test",
149
- "hc4/ru/train",
150
- "hc4/zh/dev",
151
- "hc4/zh/test",
152
- "hc4/zh/train",
153
- "highwire/trec-genomics-2006",
154
- "highwire/trec-genomics-2007",
155
- "istella22/test",
156
- "istella22/test/fold1",
157
- "istella22/test/fold2",
158
- "istella22/test/fold3",
159
- "istella22/test/fold4",
160
- "istella22/test/fold5",
161
- "kilt/codec",
162
- "kilt/codec/economics",
163
- "kilt/codec/history",
164
- "kilt/codec/politics",
165
- "lotte/lifestyle/dev/forum",
166
- "lotte/lifestyle/dev/search",
167
- "lotte/lifestyle/test/forum",
168
- "lotte/lifestyle/test/search",
169
- "lotte/pooled/dev/forum",
170
- "lotte/pooled/dev/search",
171
- "lotte/pooled/test/forum",
172
- "lotte/pooled/test/search",
173
- "lotte/recreation/dev/forum",
174
- "lotte/recreation/dev/search",
175
- "lotte/recreation/test/forum",
176
- "lotte/recreation/test/search",
177
- "lotte/science/dev/forum",
178
- "lotte/science/dev/search",
179
- "lotte/science/test/forum",
180
- "lotte/science/test/search",
181
- "lotte/technology/dev/forum",
182
- "lotte/technology/dev/search",
183
- "lotte/technology/test/forum",
184
- "lotte/technology/test/search",
185
- "lotte/writing/dev/forum",
186
- "lotte/writing/dev/search",
187
- "lotte/writing/test/forum",
188
- "lotte/writing/test/search",
189
- "medline/2004/trec-genomics-2004",
190
- "medline/2004/trec-genomics-2005",
191
- "medline/2017/trec-pm-2017",
192
- "medline/2017/trec-pm-2018",
193
- "mmarco/de/dev",
194
- "mmarco/de/dev/small",
195
- "mmarco/de/train",
196
- "mmarco/es/dev",
197
- "mmarco/es/dev/small",
198
- "mmarco/es/train",
199
- "mmarco/fr/dev",
200
- "mmarco/fr/dev/small",
201
- "mmarco/fr/train",
202
- "mmarco/id/dev",
203
- "mmarco/id/dev/small",
204
- "mmarco/id/train",
205
- "mmarco/it/dev",
206
- "mmarco/it/dev/small",
207
- "mmarco/it/train",
208
- "mmarco/pt/dev",
209
- "mmarco/pt/dev/small",
210
- "mmarco/pt/dev/small/v1.1",
211
- "mmarco/pt/dev/v1.1",
212
- "mmarco/pt/train",
213
- "mmarco/pt/train/v1.1",
214
- "mmarco/ru/dev",
215
- "mmarco/ru/dev/small",
216
- "mmarco/ru/train",
217
- "mmarco/v2/ar/dev",
218
- "mmarco/v2/ar/dev/small",
219
- "mmarco/v2/ar/train",
220
- "mmarco/v2/de/dev",
221
- "mmarco/v2/de/dev/small",
222
- "mmarco/v2/de/train",
223
- "mmarco/v2/dt/dev",
224
- "mmarco/v2/dt/dev/small",
225
- "mmarco/v2/dt/train",
226
- "mmarco/v2/es/dev",
227
- "mmarco/v2/es/dev/small",
228
- "mmarco/v2/es/train",
229
- "mmarco/v2/fr/dev",
230
- "mmarco/v2/fr/dev/small",
231
- "mmarco/v2/fr/train",
232
- "mmarco/v2/hi/dev",
233
- "mmarco/v2/hi/dev/small",
234
- "mmarco/v2/hi/train",
235
- "mmarco/v2/id/dev",
236
- "mmarco/v2/id/dev/small",
237
- "mmarco/v2/id/train",
238
- "mmarco/v2/it/dev",
239
- "mmarco/v2/it/dev/small",
240
- "mmarco/v2/it/train",
241
- "mmarco/v2/ja/dev",
242
- "mmarco/v2/ja/dev/small",
243
- "mmarco/v2/ja/train",
244
- "mmarco/v2/pt/dev",
245
- "mmarco/v2/pt/dev/small",
246
- "mmarco/v2/pt/train",
247
- "mmarco/v2/ru/dev",
248
- "mmarco/v2/ru/dev/small",
249
- "mmarco/v2/ru/train",
250
- "mmarco/v2/vi/dev",
251
- "mmarco/v2/vi/dev/small",
252
- "mmarco/v2/vi/train",
253
- "mmarco/v2/zh/dev",
254
- "mmarco/v2/zh/dev/small",
255
- "mmarco/v2/zh/train",
256
- "mmarco/zh/dev",
257
- "mmarco/zh/dev/small",
258
- "mmarco/zh/dev/small/v1.1",
259
- "mmarco/zh/dev/v1.1",
260
- "mmarco/zh/train",
261
- "mr-tydi/ar",
262
- "mr-tydi/ar/dev",
263
- "mr-tydi/ar/test",
264
- "mr-tydi/ar/train",
265
- "mr-tydi/bn",
266
- "mr-tydi/bn/dev",
267
- "mr-tydi/bn/test",
268
- "mr-tydi/bn/train",
269
- "mr-tydi/en",
270
- "mr-tydi/en/dev",
271
- "mr-tydi/en/test",
272
- "mr-tydi/en/train",
273
- "mr-tydi/fi",
274
- "mr-tydi/fi/dev",
275
- "mr-tydi/fi/test",
276
- "mr-tydi/fi/train",
277
- "mr-tydi/id",
278
- "mr-tydi/id/dev",
279
- "mr-tydi/id/test",
280
- "mr-tydi/id/train",
281
- "mr-tydi/ja",
282
- "mr-tydi/ja/dev",
283
- "mr-tydi/ja/test",
284
- "mr-tydi/ja/train",
285
- "mr-tydi/ko",
286
- "mr-tydi/ko/dev",
287
- "mr-tydi/ko/test",
288
- "mr-tydi/ko/train",
289
- "mr-tydi/ru",
290
- "mr-tydi/ru/dev",
291
- "mr-tydi/ru/test",
292
- "mr-tydi/ru/train",
293
- "mr-tydi/sw",
294
- "mr-tydi/sw/dev",
295
- "mr-tydi/sw/test",
296
- "mr-tydi/sw/train",
297
- "mr-tydi/te",
298
- "mr-tydi/te/dev",
299
- "mr-tydi/te/test",
300
- "mr-tydi/te/train",
301
- "mr-tydi/th",
302
- "mr-tydi/th/dev",
303
- "mr-tydi/th/test",
304
- "mr-tydi/th/train",
305
- "msmarco-document-v2/dev1",
306
- "msmarco-document-v2/dev2",
307
- "msmarco-document-v2/train",
308
- "msmarco-document-v2/trec-dl-2019",
309
- "msmarco-document-v2/trec-dl-2019/judged",
310
- "msmarco-document-v2/trec-dl-2020",
311
- "msmarco-document-v2/trec-dl-2020/judged",
312
- "msmarco-document-v2/trec-dl-2021",
313
- "msmarco-document-v2/trec-dl-2021/judged",
314
- "msmarco-document-v2/trec-dl-2022",
315
- "msmarco-document-v2/trec-dl-2022/judged",
316
- "msmarco-document/dev",
317
- "msmarco-document/orcas",
318
- "msmarco-document/train",
319
- "msmarco-document/trec-dl-2019",
320
- "msmarco-document/trec-dl-2019/judged",
321
- "msmarco-document/trec-dl-2020",
322
- "msmarco-document/trec-dl-2020/judged",
323
- "msmarco-document/trec-dl-hard",
324
- "msmarco-document/trec-dl-hard/fold1",
325
- "msmarco-document/trec-dl-hard/fold2",
326
- "msmarco-document/trec-dl-hard/fold3",
327
- "msmarco-document/trec-dl-hard/fold4",
328
- "msmarco-document/trec-dl-hard/fold5",
329
- "msmarco-passage-v2/dev1",
330
- "msmarco-passage-v2/dev2",
331
- "msmarco-passage-v2/train",
332
- "msmarco-passage-v2/trec-dl-2021",
333
- "msmarco-passage-v2/trec-dl-2021/judged",
334
- "msmarco-passage-v2/trec-dl-2022",
335
- "msmarco-passage-v2/trec-dl-2022/judged",
336
- "msmarco-passage/dev",
337
- "msmarco-passage/dev/2",
338
- "msmarco-passage/dev/judged",
339
- "msmarco-passage/dev/small",
340
- "msmarco-passage/train",
341
- "msmarco-passage/train/judged",
342
- "msmarco-passage/train/medical",
343
- "msmarco-passage/train/split200-train",
344
- "msmarco-passage/train/split200-valid",
345
- "msmarco-passage/train/triples-small",
346
- "msmarco-passage/train/triples-v2",
347
- "msmarco-passage/trec-dl-2019",
348
- "msmarco-passage/trec-dl-2019/judged",
349
- "msmarco-passage/trec-dl-2020",
350
- "msmarco-passage/trec-dl-2020/judged",
351
- "msmarco-passage/trec-dl-hard",
352
- "msmarco-passage/trec-dl-hard/fold1",
353
- "msmarco-passage/trec-dl-hard/fold2",
354
- "msmarco-passage/trec-dl-hard/fold3",
355
- "msmarco-passage/trec-dl-hard/fold4",
356
- "msmarco-passage/trec-dl-hard/fold5",
357
- "msmarco-qna/dev",
358
- "msmarco-qna/train",
359
- "natural-questions/dev",
360
- "natural-questions/train",
361
- "neuclir/1/fa/hc4-filtered",
362
- "neuclir/1/ru/hc4-filtered",
363
- "neuclir/1/zh/hc4-filtered",
364
- "neumarco/fa/dev",
365
- "neumarco/fa/dev/judged",
366
- "neumarco/fa/dev/small",
367
- "neumarco/fa/train",
368
- "neumarco/fa/train/judged",
369
- "neumarco/ru/dev",
370
- "neumarco/ru/dev/judged",
371
- "neumarco/ru/dev/small",
372
- "neumarco/ru/train",
373
- "neumarco/ru/train/judged",
374
- "neumarco/zh/dev",
375
- "neumarco/zh/dev/judged",
376
- "neumarco/zh/dev/small",
377
- "neumarco/zh/train",
378
- "neumarco/zh/train/judged",
379
- "nfcorpus/dev",
380
- "nfcorpus/dev/nontopic",
381
- "nfcorpus/dev/video",
382
- "nfcorpus/test",
383
- "nfcorpus/test/nontopic",
384
- "nfcorpus/test/video",
385
- "nfcorpus/train",
386
- "nfcorpus/train/nontopic",
387
- "nfcorpus/train/video",
388
- "nyt/trec-core-2017",
389
- "nyt/wksup",
390
- "nyt/wksup/train",
391
- "nyt/wksup/valid",
392
- "pmc/v1/trec-cds-2014",
393
- "pmc/v1/trec-cds-2015",
394
- "pmc/v2/trec-cds-2016",
395
- "sara",
396
- "touche-image/2022-06-13/touche-2022-task-3",
397
- "trec-arabic/ar2001",
398
- "trec-arabic/ar2002",
399
- "trec-cast/v0/train",
400
- "trec-cast/v0/train/judged",
401
- "trec-cast/v1/2019",
402
- "trec-cast/v1/2019/judged",
403
- "trec-cast/v1/2020",
404
- "trec-cast/v1/2020/judged",
405
- "trec-fair-2021/eval",
406
- "trec-fair-2021/train",
407
- "trec-fair/2021/eval",
408
- "trec-fair/2021/train",
409
- "trec-fair/2022/train",
410
- "trec-mandarin/trec5",
411
- "trec-mandarin/trec6",
412
- "trec-robust04",
413
- "trec-robust04/fold1",
414
- "trec-robust04/fold2",
415
- "trec-robust04/fold3",
416
- "trec-robust04/fold4",
417
- "trec-robust04/fold5",
418
- "trec-spanish/trec3",
419
- "trec-spanish/trec4",
420
- "trec-tot/2023/dev",
421
- "trec-tot/2023/train",
422
- "tripclick/train",
423
- "tripclick/train/head",
424
- "tripclick/train/head/dctr",
425
- "tripclick/train/hofstaetter-triples",
426
- "tripclick/train/tail",
427
- "tripclick/train/torso",
428
- "tripclick/val",
429
- "tripclick/val/head",
430
- "tripclick/val/head/dctr",
431
- "tripclick/val/tail",
432
- "tripclick/val/torso",
433
- "tweets2013-ia/trec-mb-2013",
434
- "tweets2013-ia/trec-mb-2014",
435
- "vaswani",
436
- "wapo/v2/trec-core-2018",
437
- "wapo/v2/trec-news-2018",
438
- "wapo/v2/trec-news-2019",
439
- "wikiclir/ar",
440
- "wikiclir/ca",
441
- "wikiclir/cs",
442
- "wikiclir/de",
443
- "wikiclir/en-simple",
444
- "wikiclir/es",
445
- "wikiclir/fi",
446
- "wikiclir/fr",
447
- "wikiclir/it",
448
- "wikiclir/ja",
449
- "wikiclir/ko",
450
- "wikiclir/nl",
451
- "wikiclir/nn",
452
- "wikiclir/no",
453
- "wikiclir/pl",
454
- "wikiclir/pt",
455
- "wikiclir/ro",
456
- "wikiclir/ru",
457
- "wikiclir/sv",
458
- "wikiclir/sw",
459
- "wikiclir/tl",
460
- "wikiclir/tr",
461
- "wikiclir/uk",
462
- "wikiclir/vi",
463
- "wikiclir/zh",
464
- "wikir/en1k/test",
465
- "wikir/en1k/training",
466
- "wikir/en1k/validation",
467
- "wikir/en59k/test",
468
- "wikir/en59k/training",
469
- "wikir/en59k/validation",
470
- "wikir/en78k/test",
471
- "wikir/en78k/training",
472
- "wikir/en78k/validation",
473
- "wikir/ens78k/test",
474
- "wikir/ens78k/training",
475
- "wikir/ens78k/validation",
476
- "wikir/es13k/test",
477
- "wikir/es13k/training",
478
- "wikir/es13k/validation",
479
- "wikir/fr14k/test",
480
- "wikir/fr14k/training",
481
- "wikir/fr14k/validation",
482
- "wikir/it16k/test",
483
- "wikir/it16k/training",
484
- "wikir/it16k/validation"
485
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
local_datasets/codesearch_py/corpus.jsonl DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:87ef61791e9aa9a9833e59e81756d41beaca8e4cd3efad2bb8940e5876f69008
3
- size 384365716
 
 
 
 
local_datasets/codesearch_py/qrels/test.tsv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d172966a5e2dcc39491d446ca75ed730f7309d09701c131add14eb62b45c2114
3
- size 79309
 
 
 
 
local_datasets/codesearch_py/qrels/test.tsv.tmp DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ef54b582e47e48fdd094a3da00644bcf4af684b709be3f4f72f4de23c783ea50
3
- size 79283
 
 
 
 
local_datasets/codesearch_py/qrels/test.tsv.tmp.2 DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:efda84b6d2b19a4bbd33ecd89616c88b63f4d585f7cb5ea10cc12372592306a3
3
- size 81283
 
 
 
 
local_datasets/codesearch_py/qrels/test.tsv.tmp.2.filtered DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ea798baa1ab23010a7769e60ba06e388d2b421cc2a9987b13900743df122a7c2
3
- size 24193
 
 
 
 
local_datasets/codesearch_py/queries.jsonl DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:41e3f41fad388f4f612630bdb8ccb23b319b24a0b859db226a381b6f68b1771c
3
- size 199567
 
 
 
 
local_datasets/gooaq_technical/corpus.jsonl DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:31282e5019461a6cd9d88a9e47fe6743d6962b3aeb81f5f5f78fa72eb52ff46b
3
- size 1399723
 
 
 
 
local_datasets/gooaq_technical/qrels/test.tsv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:0b56de4bfec42225780cda2fc28fd7e0ee433f313208ab210de5bcf6281757ee
3
- size 49675
 
 
 
 
local_datasets/gooaq_technical/qrels/test.tsv.tmp DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:49982dbf8d1d182a75935718cb183b91d29e3ad4db1892723371c7d762955cbc
3
- size 49649
 
 
 
 
local_datasets/gooaq_technical/qrels/test.tsv.tmp.2 DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:5f0c2a26846e0456ddd24cd6d315ae516af28504e6b2961d00e0da0ff821f648
3
- size 51649
 
 
 
 
local_datasets/gooaq_technical/qrels/test.tsv.tmp.2.filtered DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e681ddae1619d30ce425fdb01ca4ceb10f493b079369ac0e555b1338cd3914e1
3
- size 15158
 
 
 
 
local_datasets/gooaq_technical/queries.jsonl DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:25df519a5e39f0c91f6f7c5bdb74601cbfffbadd3cd1a998a8a5a48740b885be
3
- size 110860
 
 
 
 
requirements.txt CHANGED
@@ -1,11 +1,5 @@
1
- beir==1.0.1
2
  pandas==2.0.3
3
- pytrec_eval==0.5
4
  streamlit==1.24.1
5
- ir_datasets==0.5.5
6
- pyserini==0.21.0
7
- torch==2.0.1
8
  plotly==5.15.0
9
- captum==0.6.0
10
  protobuf==3.20.0
11
  beautifulsoup4==4.12.2
 
 
1
  pandas==2.0.3
 
2
  streamlit==1.24.1
 
 
 
3
  plotly==5.15.0
 
4
  protobuf==3.20.0
5
  beautifulsoup4==4.12.2
scripts/collect_ir_dataset_names.py DELETED
@@ -1,26 +0,0 @@
1
- import requests
2
- from bs4 import BeautifulSoup
3
- import re
4
- import json
5
- import os
6
- import pathlib
7
- import shutil
8
-
9
-
10
-
11
- def get_ir_dataset_names():
12
- url = "https://raw.githubusercontent.com/allenai/ir_datasets/master/ir_datasets/etc/metadata.json"
13
- # read in the json
14
- with requests.get(url) as r:
15
- data = json.loads(r.text)
16
- names = []
17
- for dataset in data:
18
- if "docs" in data[dataset] and "queries" in data[dataset] and "qrels" in data[dataset]:
19
- names.append(dataset)
20
- return names
21
-
22
-
23
- if __name__ == "__main__":
24
- names = get_ir_dataset_names()
25
- with open("ir_dataset_names.json", "w") as fout:
26
- json.dump(names, fout, indent=4)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
test.tst DELETED
@@ -1,55 +0,0 @@
1
- "base",
2
- "antique",
3
- "aol_ia",
4
- "aquaint",
5
- "argsme",
6
- "beir",
7
- "c4
8
- "car",
9
- "clinicaltrials",
10
- "clirmatrix",
11
- "clueweb09",
12
- "clueweb12",
13
- "codec",
14
- "cord19",
15
- "cranfield",
16
- "disks45",
17
- "dpr_w100",
18
- "codesearchnet",
19
- "gov",
20
- "gov2",
21
- "highwire",
22
- "istella22",
23
- "kilt",
24
- "lotte",
25
- "medline",
26
- "mmarco",
27
- "mr_tydi",
28
- "msmarco_document",
29
- "msmarco_document_v2",
30
- "msmarco_passage",
31
- "msmarco_passage_v2",
32
- "msmarco_qna",
33
- "neumarco",
34
- "nfcorpus",
35
- "natural_questions",
36
- "nyt",
37
- "pmc",
38
- "touche_image",
39
- "touche",
40
- "trec_arabic",
41
- "trec_mandarin",
42
- "trec_spanish",
43
- "trec_robust04",
44
- "trec_tot",
45
- "tripclick",
46
- "tweets2013_ia",
47
- "vaswani",
48
- "wapo",
49
- "wikiclir",
50
- "wikir",
51
- "trec_fair",
52
- "trec_cast",
53
- "hc4",
54
- "neuclir",
55
- "sara",