id
stringlengths
6
6
query_type
stringclasses
14 values
question
dict
paraphrased_question
sequence
query
dict
template_id
stringclasses
8 values
query_shape
stringclasses
7 values
query_class
stringclasses
5 values
auto_generated
bool
2 classes
number_of_patterns
int32
1
14
AQ0022
Factoid
{ "string": "Can you list the models that have been evaluated on the ACL-ARC dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"ACL-ARC\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}" }
T01
Tree
WHICH-WHAT
true
6
AQ2445
Factoid
{ "string": "Can you provide links to code used in papers that benchmark the BiT-S (ResNet-152x4) model?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"BiT-S (ResNet-152x4)\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ0118
Factoid
{ "string": "Can you list the models that have been evaluated on the Quora Question Pairs dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Quora Question Pairs\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}" }
T01
Tree
WHICH-WHAT
true
6
AQ1581
Factoid
{ "string": "Indicate the model that performed best in terms of Error metric on the DBpedia benchmark dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Error\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"DBpedia\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ0428
Factoid
{ "string": "Give me a list of research papers along with their titles and IDs, that have performed benchmarks on the WMT2016 English-German dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"WMT2016 English-German\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}" }
T02
Tree
WHICH-WHAT
true
5
AQ2450
Factoid
{ "string": "List the code links in papers that use the XLNet Large Cased model in any benchmark?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"XLNet Large Cased\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ2112
Factoid
{ "string": "List the code links in papers that use the Transformer-XL (24 layers, RMS dynamic eval, decay) model in any benchmark?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"Transformer-XL (24 layers, RMS dynamic eval, decay)\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ0415
Factoid
{ "string": "Give me a list of research papers along with their titles and IDs, that have performed benchmarks on the CIFAR-100 dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"CIFAR-100\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}" }
T02
Tree
WHICH-WHAT
true
5
AQ0588
Factoid
{ "string": "What are the titles and IDs of research papers that include a benchmark for the Atari 2600 Atlantis dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Atlantis\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}" }
T02
Tree
WHICH-WHAT
true
5
AQ0440
Factoid
{ "string": "Give me a list of research papers along with their titles and IDs, that have performed benchmarks on the Multimodal PISA dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Multimodal PISA\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}" }
T02
Tree
WHICH-WHAT
true
5
AQ1108
non-factoid
{ "string": "What is the top benchmark score and its metric on the WMT2016 English-Romanian dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"WMT2016 English-Romanian\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl" }
T04
Tree
WHICH-WHAT
true
13
AQ2129
Factoid
{ "string": "List the code links in papers that use the AWD-LSTM (3 layers) model in any benchmark?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"AWD-LSTM (3 layers)\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ1496
Factoid
{ "string": "Indicate the model that performed best in terms of BLEU metric on the RotoWire benchmark dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"BLEU\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"RotoWire\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ2412
Factoid
{ "string": "Where can I find code references in papers that have used the CAIT-XS-24 model for benchmarking purposes?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"CAIT-XS-24\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ0944
Factoid
{ "string": "What evaluation metrics are commonly used when benchmarking models on the Atari 2600 Enduro dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Enduro\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}" }
T03
Tree
WHICH-WHAT
true
6
AQ0471
Factoid
{ "string": "Give me a list of research papers along with their titles and IDs, that have performed benchmarks on the SearchQA dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"SearchQA\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}" }
T02
Tree
WHICH-WHAT
true
5
HQ0028
Non-factoid
{ "string": "Which indicators for well-being are used in the studies on the effect of COVID-19?" }
[ "Which indicators for wellbeing are used in the study \"The effect of the COVID19 pandemic on wellbeing\"?" ]
{ "sparql": "SELECT DISTINCT ?indicators, ?indicators_labels\nWHERE {\n orkgr:R78492 orkgp:compareContribution ?contrib.\n ?contrib orkgp:P36089 ?indicators.\n ?indicators rdfs:label ?indicators_labels.\n}" }
null
chain
WHICH-WHAT
false
3
AQ1921
Factoid
{ "string": "Where can I find code references in papers that have used the Linformer model for benchmarking purposes?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"Linformer\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
HQ0083
Factoid
{ "string": "What is the amount of questions for LC-QuAD 2.0 dataset?" }
[ "How many questions does LCQuAD 2.0 dataset contain?" ]
{ "sparql": "SELECT ?number_of_questions\nWHERE {\n orkgr:R154290 orkgp:P31 ?contrib.\n ?contrib orkgp:P41923 ?number_of_questions.\n}" }
null
chain
WHICH-WHAT
false
2
AQ2119
Factoid
{ "string": "Provide a list of papers that have utilized the Transformer-XL (24 layers) model and include the links to their code?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"Transformer-XL (24 layers)\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ1384
Factoid
{ "string": "What is the best performing model benchmarking the DuIE dataset in terms of F1 metric?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"F1\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"DuIE\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ2330
Factoid
{ "string": "Where can I find code references in papers that have used the Go-Explore model for benchmarking purposes?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"Go-Explore\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ1407
Factoid
{ "string": "What is the name of the top performing model in terms of RE+ Macro F1 score when benchmarked on the ADE Corpus dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"RE+ Macro F1\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"ADE Corpus\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ2192
Factoid
{ "string": "Provide a list of papers that have utilized the Adaptive Input Very Large model and include the links to their code?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"Adaptive Input Very Large\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ2028
Factoid
{ "string": "Can you provide links to code used in papers that benchmark the CMLM+LAT+1 iterations model?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"CMLM+LAT+1 iterations\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ1326
non-factoid
{ "string": "What is the top benchmark score and its metric on the BBCSport dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"BBCSport\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl" }
T04
Tree
WHICH-WHAT
true
13
AQ1645
Factoid
{ "string": "Which model has achieved the highest SUCCESS score on the Habitat 2020 Point Nav test-std benchmark dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"SUCCESS\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Habitat 2020 Point Nav test-std\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ0265
Factoid
{ "string": "Can you list the models that have been evaluated on the Atari 2600 Kangaroo dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Kangaroo\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}" }
T01
Tree
WHICH-WHAT
true
6
AQ1303
non-factoid
{ "string": "What is the top benchmark score and its metric on the Atari 2600 Montezuma\\'s Revenge dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Montezuma\\'s Revenge\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl" }
T04
Tree
WHICH-WHAT
true
13
AQ2166
Factoid
{ "string": "Where can I find code references in papers that have used the Inan et al. (2016) - Variational RHN model for benchmarking purposes?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"Inan et al. (2016) - Variational RHN\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ2358
Factoid
{ "string": "Can you provide links to code used in papers that benchmark the BiT-L model?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"BiT-L\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ1480
Factoid
{ "string": "Indicate the model that performed best in terms of ROUGE-2 metric on the CNN / Daily Mail benchmark dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"ROUGE-2\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"CNN / Daily Mail\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ0246
Factoid
{ "string": "Can you list the models that have been evaluated on the Atari 2600 Berzerk dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Berzerk\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}" }
T01
Tree
WHICH-WHAT
true
6
AQ1694
Factoid
{ "string": "Which model has achieved the highest Medium Human-Normalized Score score on the Atari-57 benchmark dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Medium Human-Normalized Score\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari-57\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ0593
Factoid
{ "string": "Provide a list of research paper titles and IDs that have benchmarked models on the Atari 2600 River Raid dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 River Raid\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}" }
T02
Tree
WHICH-WHAT
true
5
AQ0524
Factoid
{ "string": "List the title and ID of research papers that contain a benchmark over the SST-2 Binary classification dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"SST-2 Binary classification\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}" }
T02
Tree
WHICH-WHAT
true
5
AQ2396
Factoid
{ "string": "Can you provide links to code used in papers that benchmark the DeiT-S model?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"DeiT-S\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ1658
Factoid
{ "string": "What is the name of the top performing model in terms of Accuracy (%) score when benchmarked on the CINIC-10 dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Accuracy (%)\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"CINIC-10\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ1824
Factoid
{ "string": "Indicate the model that performed best in terms of Macro F1 metric on the NLP-TDMS (Exp, arXiv only) benchmark dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Macro F1\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"NLP-TDMS (Exp, arXiv only)\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
HQ0065
Factoid/Superlative
{ "string": "What is the maximum egg mass in studies?" }
[ "What is the highest mass of embroyo eggs mentioned in the studies?" ]
{ "sparql": "SELECT MAX(?egg_masses_float)\nWHERE {\n orkgr:R34845 orkgp:compareContribution ?contrib.\n ?contrib orkgp:P15692 ?egg_masses.\n BIND(xsd:float(?egg_masses) AS ?egg_masses_float)\n}" }
null
chain
WHICH-WHAT
false
2
AQ0016
Factoid
{ "string": "Can you list the models that have been evaluated on the ScienceIE dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"ScienceIE\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}" }
T01
Tree
WHICH-WHAT
true
6
AQ0113
Factoid
{ "string": "What models are being evaluated on the CoQA dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"CoQA\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}" }
T01
Tree
WHICH-WHAT
true
6
AQ0810
Factoid
{ "string": "What evaluation metrics are commonly used when benchmarking models on the QuAC dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"QuAC\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}" }
T03
Tree
WHICH-WHAT
true
6
AQ2001
Factoid
{ "string": "Can you provide links to code used in papers that benchmark the multi-head model?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"multi-head\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ2125
Factoid
{ "string": "Can you provide links to code used in papers that benchmark the Transformer-XL (18 layers) model?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"Transformer-XL (18 layers)\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ0632
Factoid
{ "string": "What are the titles and IDs of research papers that include a benchmark for the Atari 2600 Gravitar dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Gravitar\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}" }
T02
Tree
WHICH-WHAT
true
5
AQ0997
Factoid
{ "string": "What evaluation metrics are commonly used when benchmarking models on the ACL-ARC dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"ACL-ARC\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}" }
T03
Tree
WHICH-WHAT
true
6
AQ0581
Factoid
{ "string": "What are the titles and IDs of research papers that include a benchmark for the Atari 2600 Double Dunk dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Double Dunk\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}" }
T02
Tree
WHICH-WHAT
true
5
AQ0228
Factoid
{ "string": "What models are being evaluated on the Barabasi-Albert dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Barabasi-Albert\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}" }
T01
Tree
WHICH-WHAT
true
6
AQ1957
Factoid
{ "string": "Can you provide links to code used in papers that benchmark the SciGEN model?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"SciGEN\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ1461
Factoid
{ "string": "What is the name of the top performing model in terms of Accuracy score when benchmarked on the BioASQ dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Accuracy\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"BioASQ\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ1832
Factoid
{ "string": "Indicate the model that performed best in terms of Accuracy metric on the PROTEINS benchmark dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Accuracy\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"PROTEINS\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ1050
non-factoid
{ "string": "What is the top benchmark result (metric and value) over the dataset TDMSci?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"TDMSci\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl" }
T04
Tree
WHICH-WHAT
true
13
AQ0442
Factoid
{ "string": "What are the titles and IDs of research papers that include a benchmark for the HMDB51 (finetuned) dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"HMDB51 (finetuned)\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}" }
T02
Tree
WHICH-WHAT
true
5
AQ1396
Factoid
{ "string": "What is the best performing model benchmarking the ACE 2005 dataset in terms of Sentence Encoder metric?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Sentence Encoder\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"ACE 2005\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ0849
Factoid
{ "string": "Can you list the metrics used to evaluate models on the Jacquard dataset dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \" Jacquard dataset\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}" }
T03
Tree
WHICH-WHAT
true
6
AQ1525
Factoid
{ "string": "What is the name of the top performing model in terms of Number of params score when benchmarked on the Hutter Prize dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Number of params\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Hutter Prize\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ1996
Factoid
{ "string": "Provide a list of papers that have utilized the SciBERT (SciVocab) model and include the links to their code?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"SciBERT (SciVocab)\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ1350
non-factoid
{ "string": "What is the top benchmark result (metric and value) over the dataset Flowers-102?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Flowers-102\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl" }
T04
Tree
WHICH-WHAT
true
13
AQ0908
Factoid
{ "string": "What are the metrics of evaluation over the MLDoc Zero-Shot English-to-Spanish dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"MLDoc Zero-Shot English-to-Spanish\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}" }
T03
Tree
WHICH-WHAT
true
6
AQ1601
Factoid
{ "string": "What is the best performing model benchmarking the CIFAR-10 dataset in terms of FLOPS metric?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"FLOPS\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"CIFAR-10\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ1948
Factoid
{ "string": "Provide a list of papers that have utilized the DyGIE++ + OpenIE + Stanford Core NLP PoS tagger enriched by consistent triples model and include the links to their code?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"DyGIE++ + OpenIE + Stanford Core NLP PoS tagger enriched by consistent triples\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ2190
Factoid
{ "string": "Where can I find code references in papers that have used the Neural cache model (size = 100) model for benchmarking purposes?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"Neural cache model (size = 100)\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ1675
Factoid
{ "string": "What is the name of the top performing model in terms of Accuracy score when benchmarked on the MLDoc Zero-Shot English-to-Spanish dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Accuracy\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"MLDoc Zero-Shot English-to-Spanish\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ0319
Factoid
{ "string": "Can you list the models that have been evaluated on the ObjectNet (Bounding Box) dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"ObjectNet (Bounding Box)\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}" }
T01
Tree
WHICH-WHAT
true
6
AQ1184
non-factoid
{ "string": "Can you provide the highest benchmark result, including the metric and score, for the WikiText-2 dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"WikiText-2\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl" }
T04
Tree
WHICH-WHAT
true
13
AQ0174
Factoid
{ "string": "Can you list the models that have been evaluated on the DCASE dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"DCASE\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}" }
T01
Tree
WHICH-WHAT
true
6
AQ1389
Factoid
{ "string": "Which model has achieved the highest F1 score on the ACE 2004 benchmark dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"F1\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"ACE 2004\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ1902
Factoid
{ "string": "Provide a list of benchmarked datasets related to the Document Classification research area?" }
[]
{ "sparql": "SELECT DISTINCT ?dataset ?dataset_lbl\nWHERE {\n ?problem a orkgc:Problem;\n rdfs:label ?problem_lbl. \n FILTER (str(?problem_lbl) = \"Document Classification\")\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:P32 ?problem.\n}" }
T06
Tree
WHICH-WHAT
true
5
AQ0758
Factoid
{ "string": "List the metrics that are used to evaluate models on the CIFAR-100 benchmark dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"CIFAR-100\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}" }
T03
Tree
WHICH-WHAT
true
6
AQ2404
Factoid
{ "string": "Can you provide links to code used in papers that benchmark the CAIT-M-24 model?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"CAIT-M-24\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ1867
Factoid
{ "string": "Name the datasets that have been used for benchmarking in the Language Modelling research problem?" }
[]
{ "sparql": "SELECT DISTINCT ?dataset ?dataset_lbl\nWHERE {\n ?problem a orkgc:Problem;\n rdfs:label ?problem_lbl. \n FILTER (str(?problem_lbl) = \"Language Modelling\")\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:P32 ?problem.\n}" }
T06
Tree
WHICH-WHAT
true
5
AQ1146
non-factoid
{ "string": "Can you provide the highest benchmark result, including the metric and score, for the WebQuestions dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"WebQuestions\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl" }
T04
Tree
WHICH-WHAT
true
13
AQ0860
Factoid
{ "string": "What are the metrics of evaluation over the DCASE dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"DCASE\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}" }
T03
Tree
WHICH-WHAT
true
6
AQ0771
Factoid
{ "string": "List the metrics that are used to evaluate models on the WMT2016 English-German benchmark dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"WMT2016 English-German\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}" }
T03
Tree
WHICH-WHAT
true
6
HQ0074
Factoid/Ranking
{ "string": "For what product minimum conversion was obtained?" }
[ "What product in the studies is characterized by the lowest value of conversion?" ]
{ "sparql": "SELECT ?product, ?product_label\nWHERE {\n orkgr:R155272 orkgp:compareContribution ?contrib.\n ?contrib orkgp:P43149 ?product;\n orkgp:P43148 ?conversion.\n ?product rdfs:label ?product_label.\n ?conversion rdfs:label ?conversion_label.\n}\nORDER BY ASC(xsd:float(?conversion_label))\nLIMIT 1" }
null
tree
WHICH-WHAT
false
5
AQ1218
non-factoid
{ "string": "What is the highest benchmark result achieved on the BC5CDR dataset, including the metric and its value?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"BC5CDR\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl" }
T04
Tree
WHICH-WHAT
true
13
AQ0063
Factoid
{ "string": "Can you list the models that have been evaluated on the NYT-single dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"NYT-single\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}" }
T01
Tree
WHICH-WHAT
true
6
AQ2366
Factoid
{ "string": "List the code links in papers that use the CeiT-T (384 finetune resolution) model in any benchmark?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"CeiT-T (384 finetune resolution)\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ1756
Factoid
{ "string": "Which model has achieved the highest Score score on the Atari 2600 Star Gunner benchmark dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Score\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Star Gunner\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ1548
Factoid
{ "string": "Indicate the model that performed best in terms of SICK-E metric on the SentEval benchmark dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"SICK-E\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"SentEval\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ1368
non-factoid
{ "string": "Can you provide the highest benchmark result, including the metric and score, for the PROTEINS dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"PROTEINS\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl" }
T04
Tree
WHICH-WHAT
true
13
AQ2023
Factoid
{ "string": "Where can I find code references in papers that have used the Unsupervised PBSMT model for benchmarking purposes?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"Unsupervised PBSMT\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ2019
Factoid
{ "string": "Where can I find code references in papers that have used the SciIE model for benchmarking purposes?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"SciIE\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ0719
Factoid
{ "string": "What are the metrics of evaluation over the Automatically labeled Medline abstracts corpus dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Automatically labeled Medline abstracts corpus\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}" }
T03
Tree
WHICH-WHAT
true
6
AQ2108
Factoid
{ "string": "List the code links in papers that use the XLNet (Large) model in any benchmark?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"XLNet (Large)\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ1313
non-factoid
{ "string": "What is the top benchmark score and its metric on the Atari 2600 HERO dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 HERO\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl" }
T04
Tree
WHICH-WHAT
true
13
AQ1491
Factoid
{ "string": "What is the name of the top performing model in terms of Recall score when benchmarked on the Rotowire (Content Selection) dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Recall\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Rotowire (Content Selection)\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ0370
Factoid
{ "string": "What are the titles and IDs of research papers that include a benchmark for the SciGEN dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"SciGEN\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}" }
T02
Tree
WHICH-WHAT
true
5
AQ2402
Factoid
{ "string": "Provide a list of papers that have utilized the CAIT-M-36 model and include the links to their code?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"CAIT-M-36\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ1473
Factoid
{ "string": "What is the best performing model benchmarking the TriviaQA dataset in terms of F1 metric?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"F1\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"TriviaQA\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ0054
Factoid
{ "string": "What are the models that have been benchmarked on the NYT dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"NYT\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}" }
T01
Tree
WHICH-WHAT
true
6
AQ1669
Factoid
{ "string": "What is the best performing model benchmarking the DTD dataset in terms of PARAMS metric?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"PARAMS\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"DTD\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ0464
Factoid
{ "string": "Give me a list of research papers along with their titles and IDs, that have performed benchmarks on the Story Cloze Test dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Story Cloze Test\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}" }
T02
Tree
WHICH-WHAT
true
5
AQ0571
Factoid
{ "string": "Provide a list of research paper titles and IDs that have benchmarked models on the Barabasi-Albert dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Barabasi-Albert\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}" }
T02
Tree
WHICH-WHAT
true
5
AQ1695
Factoid
{ "string": "Which model has achieved the highest Score score on the Atari 2600 Ms. Pacman benchmark dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Score\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Ms. Pacman\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}" }
T05
Tree
WHICH-WHAT
true
12
AQ2242
Factoid
{ "string": "Where can I find code references in papers that have used the SciBERT (active learning) model for benchmarking purposes?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"SciBERT (active learning)\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4
AQ0718
Factoid
{ "string": "What are the metrics of evaluation over the Annotated development corpus dataset?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Annotated development corpus\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}" }
T03
Tree
WHICH-WHAT
true
6
AQ1161
non-factoid
{ "string": "What is the highest benchmark result achieved on the RotoWire (Content Ordering) dataset, including the metric and its value?" }
[]
{ "sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"RotoWire (Content Ordering)\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl" }
T04
Tree
WHICH-WHAT
true
13
AQ1941
Factoid
{ "string": "Can you provide links to code used in papers that benchmark the DocTAET-TDM model?" }
[]
{ "sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"DocTAET-TDM\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}" }
T07
Tree
WHICH-WHAT
true
4