id
stringlengths
6
6
query_type
stringclasses
14 values
question
dict
paraphrased_question
sequence
query
dict
template_id
stringclasses
8 values
query_shape
stringclasses
7 values
query_class
stringclasses
5 values
auto_generated
bool
2 classes
number_of_patterns
int32
1
14
HQ0049
Non-Factoid/Count
{ "string": "What is the average energy generation of all energy sources considered?" }
[ "What is the mean value of energy generation for all energy sources in the studies?" ]
{ "sparql": "SELECT (AVG(?elec_gen_value) AS ?average_elec_gen_value)\nWHERE {\n orkgr:R153801 orkgp:compareContribution ?contrib.\n ?contrib orkgp:P43135 ?energy_sources.\n ?energy_sources rdfs:label ?energy_sources_labels;\n orkgp:P43134 ?electricity_generation.\n FILTER(REGEX(?energy_sources_labels, \"all sources\"))\n ?electricity_generation orkgp:HAS_VALUE ?value.\n BIND(xsd:float(?value) AS ?elec_gen_value)\n}" }
null
tree
WHICH-WHAT
false
5
AQ1464
Factoid
{ "string": "What is the name of the top performing model in terms of Accuracy score when benchmarked on the BoolQ dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Accuracy\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"BoolQ\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ0456
Factoid
{ "string": "Provide a list of research paper titles and IDs that have benchmarked models on the CoQA dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"CoQA\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}" }
T02
Tree
WHICH-WHAT
true
5
AQ1074
non-factoid
{ "string": "Can you provide the highest benchmark result, including the metric and score, for the Amazon dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Amazon\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl" }
T04
Tree
WHICH-WHAT
true
13
AQ1660
Factoid
{ "string": "Which model has achieved the highest PARAMS score on the CINIC-10 benchmark dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"PARAMS\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"CINIC-10\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ0774
Factoid
{ "string": "Can you list the metrics used to evaluate models on the WMT2016 Romanian-English dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"WMT2016 Romanian-English\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}" }
T03
Tree
WHICH-WHAT
true
6
AQ2350
Factoid
{ "string": "Can you provide links to code used in papers that benchmark the ApproxRepSet model?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"ApproxRepSet\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ1115
non-factoid
{ "string": "What is the top benchmark score and its metric on the WMT2016 Russian-English dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"WMT2016 Russian-English\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl" }
T04
Tree
WHICH-WHAT
true
13
AQ0504
Factoid
{ "string": "What are the titles and IDs of research papers that include a benchmark for the ImageNet dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"ImageNet\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}" }
T02
Tree
WHICH-WHAT
true
5
AQ2211
Factoid
{ "string": "List the code links in papers that use the Transformer-XL - 24 layers model in any benchmark?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"Transformer-XL - 24 layers\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ1137
non-factoid
{ "string": "What is the top benchmark score and its metric on the DROP Test dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"DROP Test\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl" }
T04
Tree
WHICH-WHAT
true
13
AQ1847
Factoid
{ "string": "What are the most commonly used benchmark datasets for the Phrase Extraction research field?" }
[]
{ "sparql": "SELECT DISTINCT ?dataset ?dataset_lbl\nWHERE {\n ?problem a orkgc:Problem;\n rdfs:label ?problem_lbl. \n FILTER (str(?problem_lbl) = \"Phrase Extraction\")\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:P32 ?problem.\n}" }
T06
Tree
WHICH-WHAT
true
5
AQ0480
Factoid
{ "string": "Provide a list of research paper titles and IDs that have benchmarked models on the ARC (Easy) dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"ARC (Easy)\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}" }
T02
Tree
WHICH-WHAT
true
5
AQ2057
Factoid
{ "string": "Where can I find code references in papers that have used the Cluster-Former (#C=512) model for benchmarking purposes?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"Cluster-Former (#C=512)\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ1351
non-factoid
{ "string": "What is the top benchmark score and its metric on the iNaturalist 2018 dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"iNaturalist 2018\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl" }
T04
Tree
WHICH-WHAT
true
13
AQ0201
Factoid
{ "string": "What models are being evaluated on the Ball in cup, catch (DMControl100k) dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Ball in cup, catch (DMControl100k)\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}" }
T01
Tree
WHICH-WHAT
true
6
AQ0819
Factoid
{ "string": "What are the metrics of evaluation over the RotoWire dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"RotoWire\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}" }
T03
Tree
WHICH-WHAT
true
6
AQ0841
Factoid
{ "string": "List the metrics that are used to evaluate models on the WikiText-2 benchmark dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"WikiText-2\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}" }
T03
Tree
WHICH-WHAT
true
6
AQ1096
non-factoid
{ "string": "What is the top benchmark score and its metric on the NYT24 dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"NYT24\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl" }
T04
Tree
WHICH-WHAT
true
13
AQ0600
Factoid
{ "string": "List the title and ID of research papers that contain a benchmark over the Atari 2600 Alien dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Alien\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}" }
T02
Tree
WHICH-WHAT
true
5
AQ2143
Factoid
{ "string": "Can you provide links to code used in papers that benchmark the mLSTM + dynamic eval model?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"mLSTM + dynamic eval\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ1375
Factoid
{ "string": "Indicate the model that performed best in terms of Accuracy metric on the Penn Treebank benchmark dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Accuracy\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Penn Treebank\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ0346
Factoid
{ "string": "List the title and ID of research papers that contain a benchmark over the STEM-ECR v1.0 dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"STEM-ECR v1.0\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}" }
T02
Tree
WHICH-WHAT
true
5
AQ1174
non-factoid
{ "string": "Can you provide the highest benchmark result, including the metric and score, for the MedNLI dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"MedNLI\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl" }
T04
Tree
WHICH-WHAT
true
13
AQ0467
Factoid
{ "string": "Give me a list of research papers along with their titles and IDs, that have performed benchmarks on the QuAC dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"QuAC\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}" }
T02
Tree
WHICH-WHAT
true
5
AQ1965
Factoid
{ "string": "List the code links in papers that use the entity and relations table model model in any benchmark?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"entity and relations table model\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ1778
Factoid
{ "string": "What is the best performing model benchmarking the FGVC Aircraft dataset in terms of Top-1 metric?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Top-1\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"FGVC Aircraft\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ1627
Factoid
{ "string": "Indicate the model that performed best in terms of Accuracy metric on the MPQA benchmark dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Accuracy\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"MPQA\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ0857
Factoid
{ "string": "Can you list the metrics used to evaluate models on the GENIA - UAS dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"GENIA - UAS\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}" }
T03
Tree
WHICH-WHAT
true
6
AQ2035
Factoid
{ "string": "Can you provide links to code used in papers that benchmark the Transformer Big + adversarial MLE model?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"Transformer Big + adversarial MLE\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ0388
Factoid
{ "string": "Give me a list of research papers along with their titles and IDs, that have performed benchmarks on the Amazon dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Amazon\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}" }
T02
Tree
WHICH-WHAT
true
5
AQ1322
non-factoid
{ "string": "What is the highest benchmark result achieved on the Yelp-2 dataset, including the metric and its value?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Yelp-2\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl" }
T04
Tree
WHICH-WHAT
true
13
AQ2341
Factoid
{ "string": "List the code links in papers that use the Sarsa-φ-EB model in any benchmark?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"Sarsa-φ-EB\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ0935
Factoid
{ "string": "List the metrics that are used to evaluate models on the Atari 2600 Demon Attack benchmark dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Demon Attack\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}" }
T03
Tree
WHICH-WHAT
true
6
AQ1888
Factoid
{ "string": "Name the datasets that have been used for benchmarking in the Humor Detection research problem?" }
[]
{ "sparql": "SELECT DISTINCT ?dataset ?dataset_lbl\nWHERE {\n ?problem a orkgc:Problem;\n rdfs:label ?problem_lbl. \n FILTER (str(?problem_lbl) = \"Humor Detection\")\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:P32 ?problem.\n}" }
T06
Tree
WHICH-WHAT
true
5
AQ0994
Factoid
{ "string": "What evaluation metrics are commonly used when benchmarking models on the Paper Field dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Paper Field\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}" }
T03
Tree
WHICH-WHAT
true
6
AQ1773
Factoid
{ "string": "Indicate the model that performed best in terms of F1 metric on the Reuters-21578 benchmark dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"F1\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Reuters-21578\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ1305
non-factoid
{ "string": "What is the highest benchmark result achieved on the Atari 2600 Private Eye dataset, including the metric and its value?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Private Eye\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl" }
T04
Tree
WHICH-WHAT
true
13
AQ1842
Factoid
{ "string": "What are the most commonly used benchmark datasets for the Named entity recognition research field?" }
[]
{ "sparql": "SELECT DISTINCT ?dataset ?dataset_lbl\nWHERE {\n ?problem a orkgc:Problem;\n rdfs:label ?problem_lbl. \n FILTER (str(?problem_lbl) = \"Named entity recognition\")\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:P32 ?problem.\n}" }
T06
Tree
WHICH-WHAT
true
5
AQ1342
non-factoid
{ "string": "What is the highest benchmark result achieved on the Classical music, 5 seconds at 12 kHz dataset, including the metric and its value?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Classical music, 5 seconds at 12 kHz\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl" }
T04
Tree
WHICH-WHAT
true
13
AQ0335
Factoid
{ "string": "Can you list the models that have been evaluated on the PWC Leaderboards (restricted) dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"PWC Leaderboards (restricted)\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}" }
T01
Tree
WHICH-WHAT
true
6
AQ0845
Factoid
{ "string": "What evaluation metrics are commonly used when benchmarking models on the MedSTS dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"MedSTS\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}" }
T03
Tree
WHICH-WHAT
true
6
AQ0355
Factoid
{ "string": "What are the titles and IDs of research papers that include a benchmark for the TDM Tagged Corpus dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"TDM Tagged Corpus\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}" }
T02
Tree
WHICH-WHAT
true
5
AQ1268
non-factoid
{ "string": "Can you provide the highest benchmark result, including the metric and score, for the Atari-57 dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari-57\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl" }
T04
Tree
WHICH-WHAT
true
13
AQ2014
Factoid
{ "string": "Where can I find code references in papers that have used the SpERT (without overlap) model for benchmarking purposes?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"SpERT (without overlap)\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ2038
Factoid
{ "string": "Can you provide links to code used in papers that benchmark the SMT as posterior regularization model?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"SMT as posterior regularization\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ0399
Factoid
{ "string": "Give me a list of research papers along with their titles and IDs, that have performed benchmarks on the DuIE dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"DuIE\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}" }
T02
Tree
WHICH-WHAT
true
5
AQ0216
Factoid
{ "string": "What are the models that have been benchmarked on the AAPD dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"AAPD\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}" }
T01
Tree
WHICH-WHAT
true
6
AQ2067
Factoid
{ "string": "Can you provide links to code used in papers that benchmark the MPCM model?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"MPCM\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ1820
Factoid
{ "string": "Which model has achieved the highest Macro Precision score on the PWC Leaderboards (restricted) benchmark dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Macro Precision\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"PWC Leaderboards (restricted)\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ1932
Factoid
{ "string": "Where can I find code references in papers that have used the ST-MoE model for benchmarking purposes?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"ST-MoE\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ0703
Factoid
{ "string": "What evaluation metrics are commonly used when benchmarking models on the OA-STM dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"OA-STM\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}" }
T03
Tree
WHICH-WHAT
true
6
AQ1065
non-factoid
{ "string": "What is the top benchmark result (metric and value) over the dataset NLP-TDMS?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"NLP-TDMS\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl" }
T04
Tree
WHICH-WHAT
true
13
AQ1611
Factoid
{ "string": "Which model has achieved the highest PARAMS score on the STL-10 benchmark dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"PARAMS\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"STL-10\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ1493
Factoid
{ "string": "What is the best performing model benchmarking the RotoWire (Relation Generation) dataset in terms of count metric?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"count\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"RotoWire (Relation Generation)\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ1064
non-factoid
{ "string": "Can you provide the highest benchmark result, including the metric and score, for the DRI Corpus dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"DRI Corpus\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl" }
T04
Tree
WHICH-WHAT
true
13
AQ2266
Factoid
{ "string": "Where can I find code references in papers that have used the e2e-coref + ELMo model for benchmarking purposes?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"e2e-coref + ELMo\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ1149
non-factoid
{ "string": "Can you provide the highest benchmark result, including the metric and score, for the TriviaQA dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"TriviaQA\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl" }
T04
Tree
WHICH-WHAT
true
13
AQ1175
non-factoid
{ "string": "Can you provide the highest benchmark result, including the metric and score, for the SNLI dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"SNLI\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl" }
T04
Tree
WHICH-WHAT
true
13
AQ1863
Factoid
{ "string": "What are the most commonly used benchmark datasets for the Data-to-Text Generation research field?" }
[]
{ "sparql": "SELECT DISTINCT ?dataset ?dataset_lbl\nWHERE {\n ?problem a orkgc:Problem;\n rdfs:label ?problem_lbl. \n FILTER (str(?problem_lbl) = \"Data-to-Text Generation\")\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:P32 ?problem.\n}" }
T06
Tree
WHICH-WHAT
true
5
AQ1460
Factoid
{ "string": "Indicate the model that performed best in terms of F1 (Short) metric on the Natural Questions benchmark dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"F1 (Short)\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Natural Questions\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ0071
Factoid
{ "string": "What models are being evaluated on the CIFAR-10 dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"CIFAR-10\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}" }
T01
Tree
WHICH-WHAT
true
6
AQ0685
Factoid
{ "string": "List the title and ID of research papers that contain a benchmark over the Open Entity dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Open Entity\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}" }
T02
Tree
WHICH-WHAT
true
5
AQ1604
Factoid
{ "string": "Which model has achieved the highest PARAMS score on the CIFAR-100 benchmark dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"PARAMS\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"CIFAR-100\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ1508
Factoid
{ "string": "What is the best performing model benchmarking the RTE dataset in terms of Accuracy metric?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Accuracy\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"RTE\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ0733
Factoid
{ "string": "Can you list the metrics used to evaluate models on the Twitter dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Twitter\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}" }
T03
Tree
WHICH-WHAT
true
6
AQ1257
non-factoid
{ "string": "What is the top benchmark score and its metric on the Barabasi-Albert dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Barabasi-Albert\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl" }
T04
Tree
WHICH-WHAT
true
13
AQ1845
Factoid
{ "string": "Provide a list of benchmarked datasets related to the Scientific Claim Verification research area?" }
[]
{ "sparql": "SELECT DISTINCT ?dataset ?dataset_lbl\nWHERE {\n ?problem a orkgc:Problem;\n rdfs:label ?problem_lbl. \n FILTER (str(?problem_lbl) = \"Scientific Claim Verification\")\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:P32 ?problem.\n}" }
T06
Tree
WHICH-WHAT
true
5
AQ1072
non-factoid
{ "string": "What is the top benchmark score and its metric on the nuScenes dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"nuScenes\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl" }
T04
Tree
WHICH-WHAT
true
13
AQ0438
Factoid
{ "string": "Give me a list of research papers along with their titles and IDs, that have performed benchmarks on the Oxford 102 Flowers dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Oxford 102 Flowers\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}" }
T02
Tree
WHICH-WHAT
true
5
AQ1510
Factoid
{ "string": "What is the name of the top performing model in terms of Accuracy score when benchmarked on the CommitmentBank dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Accuracy\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"CommitmentBank\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ1067
non-factoid
{ "string": "What is the highest benchmark result achieved on the Car speed in Liuliqiao District, Beijing dataset, including the metric and its value?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Car speed in Liuliqiao District, Beijing\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl" }
T04
Tree
WHICH-WHAT
true
13
AQ1711
Factoid
{ "string": "Indicate the model that performed best in terms of Score metric on the Atari 2600 Alien benchmark dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Score\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Alien\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ1680
Factoid
{ "string": "Which model has achieved the highest Entropy Difference score on the Barabasi-Albert benchmark dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Entropy Difference\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Barabasi-Albert\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ1173
non-factoid
{ "string": "What is the top benchmark result (metric and value) over the dataset ANLI test?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"ANLI test\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl" }
T04
Tree
WHICH-WHAT
true
13
AQ0576
Factoid
{ "string": "Give me a list of research papers along with their titles and IDs, that have performed benchmarks on the AESLC dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"AESLC\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}" }
T02
Tree
WHICH-WHAT
true
5
AQ2322
Factoid
{ "string": "Provide a list of papers that have utilized the LASER model and include the links to their code?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"LASER\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ0144
Factoid
{ "string": "What are the models that have been benchmarked on the ANLI test dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"ANLI test\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}" }
T01
Tree
WHICH-WHAT
true
6
AQ2225
Factoid
{ "string": "Provide a list of papers that have utilized the He et al., 2017 + ELMo model and include the links to their code?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"He et al., 2017 + ELMo\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ0674
Factoid
{ "string": "List the title and ID of research papers that contain a benchmark over the BUCC Russian-to-English dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"BUCC Russian-to-English\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}" }
T02
Tree
WHICH-WHAT
true
5
AQ1118
non-factoid
{ "string": "What is the top benchmark score and its metric on the IWSLT2015 English-German dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"IWSLT2015 English-German\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl" }
T04
Tree
WHICH-WHAT
true
13
AQ2409
Factoid
{ "string": "Where can I find code references in papers that have used the EfficientNetV2-S (21k) model for benchmarking purposes?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"EfficientNetV2-S (21k)\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ1715
Factoid
{ "string": "What is the best performing model benchmarking the Atari 2600 Time Pilot dataset in terms of Score metric?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Score\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Time Pilot\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ1269
non-factoid
{ "string": "Can you provide the highest benchmark result, including the metric and score, for the Atari 2600 Ms. Pacman dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Ms. Pacman\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl" }
T04
Tree
WHICH-WHAT
true
13
AQ0100
Factoid
{ "string": "Could you provide a list of models that have been tested on the UCF101 (finetuned) benchmark dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"UCF101 (finetuned)\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}" }
T01
Tree
WHICH-WHAT
true
6
AQ1145
non-factoid
{ "string": "What is the top benchmark score and its metric on the SQuAD1.1 dev dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"SQuAD1.1 dev\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl" }
T04
Tree
WHICH-WHAT
true
13
AQ1395
Factoid
{ "string": "What is the best performing model benchmarking the ACE 2005 dataset in terms of RE Micro F1 metric?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"RE Micro F1\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"ACE 2005\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ1046
non-factoid
{ "string": "What is the top benchmark result (metric and value) over the dataset OA-STM?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"OA-STM\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl" }
T04
Tree
WHICH-WHAT
true
13
AQ0956
Factoid
{ "string": "Can you list the metrics used to evaluate models on the Atari 2600 Yars Revenge dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Yars Revenge\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}" }
T03
Tree
WHICH-WHAT
true
6
AQ0608
Factoid
{ "string": "Give me a list of research papers along with their titles and IDs, that have performed benchmarks on the Atari 2600 Kangaroo dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Kangaroo\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}" }
T02
Tree
WHICH-WHAT
true
5
AQ2464
Factoid
{ "string": "Can you list benchmarked problems in the area of Information Science?" }
[]
{ "sparql": "SELECT DISTINCT ?problem ?problem_lbl\nWHERE {\n ?rf a orkgc:ResearchField;\n rdfs:label ?rf_label.\n FILTER (str(?rf_label) = \"Information Science\")\n ?paper orkgp:P30 ?rf;\n orkgp:P31 ?cont.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:P32 ?problem.\n ?problem rdfs:label ?problem_lbl.\n}" }
T08
Tree
WHICH-WHAT
true
5
AQ0535
Factoid
{ "string": "List the title and ID of research papers that contain a benchmark over the Ball in cup, catch (DMControl500k) dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Ball in cup, catch (DMControl500k)\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}" }
T02
Tree
WHICH-WHAT
true
5
AQ1552
Factoid
{ "string": "Which model has achieved the highest F1 score on the GENIA - UAS benchmark dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"F1\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"GENIA - UAS\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ1958
Factoid
{ "string": "List the code links in papers that use the word BiLSTM + char CNN + CRF model in any benchmark?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"word BiLSTM + char CNN + CRF\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ1974
Factoid
{ "string": "Can you provide links to code used in papers that benchmark the SciKG model?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"SciKG\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ0005
Factoid
{ "string": "Can you list the models that have been evaluated on the SciREX dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"SciREX\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}" }
T01
Tree
WHICH-WHAT
true
6
AQ0275
Factoid
{ "string": "What are the models that have been benchmarked on the Atari 2600 Frostbite dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Frostbite\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}" }
T01
Tree
WHICH-WHAT
true
6
AQ2332
Factoid
{ "string": "Provide a list of papers that have utilized the DARQN hard model and include the links to their code?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"DARQN hard\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ1499
Factoid
{ "string": "Which model has achieved the highest Accuracy score on the OpenBookQA benchmark dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Accuracy\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"OpenBookQA\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ1696
Factoid
{ "string": "What is the name of the top performing model in terms of Score score when benchmarked on the Atari 2600 Pong dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Score\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Pong\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12