Datasets:
Tasks:
Question Answering
Modalities:
Text
Languages:
English
Size:
1K - 10K
Tags:
knowledge-base-qa
License:
id
stringlengths 6
6
| query_type
stringclasses 14
values | question
dict | paraphrased_question
sequence | query
dict | template_id
stringclasses 8
values | query_shape
stringclasses 7
values | query_class
stringclasses 5
values | auto_generated
bool 2
classes | number_of_patterns
int32 1
14
|
---|---|---|---|---|---|---|---|---|---|
AQ0508 | Factoid | {
"string": "What are the titles and IDs of research papers that include a benchmark for the DDI extraction 2013 corpus dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"DDI extraction 2013 corpus\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}"
} | T02 | Tree | WHICH-WHAT | true | 5 |
AQ2344 | Factoid | {
"string": "List the code links in papers that use the A2C+CoEX model in any benchmark?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"A2C+CoEX\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ2349 | Factoid | {
"string": "Can you provide links to code used in papers that benchmark the Orthogonalized Soft VSM model?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"Orthogonalized Soft VSM\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ0169 | Factoid | {
"string": "Can you list the models that have been evaluated on the SentEval dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"SentEval\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}"
} | T01 | Tree | WHICH-WHAT | true | 6 |
AQ0119 | Factoid | {
"string": "Can you list the models that have been evaluated on the COPA dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"COPA\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}"
} | T01 | Tree | WHICH-WHAT | true | 6 |
AQ0179 | Factoid | {
"string": "Could you provide a list of models that have been tested on the MPQA benchmark dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"MPQA\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}"
} | T01 | Tree | WHICH-WHAT | true | 6 |
AQ1912 | Factoid | {
"string": "Where can I find code references in papers that have used the Memory Compressed model for benchmarking purposes?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"Memory Compressed\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ1857 | Factoid | {
"string": "What are the most commonly used benchmark datasets for the Image Generation research field?"
} | [] | {
"sparql": "SELECT DISTINCT ?dataset ?dataset_lbl\nWHERE {\n ?problem a orkgc:Problem;\n rdfs:label ?problem_lbl. \n FILTER (str(?problem_lbl) = \"Image Generation\")\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:P32 ?problem.\n}"
} | T06 | Tree | WHICH-WHAT | true | 5 |
AQ1751 | Factoid | {
"string": "What is the name of the top performing model in terms of Score score when benchmarked on the Atari 2600 Defender dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Score\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Defender\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
AQ1222 | non-factoid | {
"string": "What is the top benchmark score and its metric on the Walker, walk (DMControl100k) dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Walker, walk (DMControl100k)\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl"
} | T04 | Tree | WHICH-WHAT | true | 13 |
AQ0891 | Factoid | {
"string": "List the metrics that are used to evaluate models on the Habitat 2020 Point Nav test-std benchmark dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Habitat 2020 Point Nav test-std\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}"
} | T03 | Tree | WHICH-WHAT | true | 6 |
AQ1488 | Factoid | {
"string": "What is the name of the top performing model in terms of Number of params score when benchmarked on the One Billion Word dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Number of params\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"One Billion Word\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
AQ1090 | non-factoid | {
"string": "What is the top benchmark score and its metric on the ChemProt dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"ChemProt\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl"
} | T04 | Tree | WHICH-WHAT | true | 13 |
AQ0004 | Factoid | {
"string": "Can you list the models that have been evaluated on the AI-KG dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"AI-KG\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}"
} | T01 | Tree | WHICH-WHAT | true | 6 |
AQ1861 | Factoid | {
"string": "What are the most commonly used benchmark datasets for the Multi-Task Learning research field?"
} | [] | {
"sparql": "SELECT DISTINCT ?dataset ?dataset_lbl\nWHERE {\n ?problem a orkgc:Problem;\n rdfs:label ?problem_lbl. \n FILTER (str(?problem_lbl) = \"Multi-Task Learning\")\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:P32 ?problem.\n}"
} | T06 | Tree | WHICH-WHAT | true | 5 |
AQ0332 | Factoid | {
"string": "What models are being evaluated on the ModelNet40 dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"ModelNet40\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}"
} | T01 | Tree | WHICH-WHAT | true | 6 |
AQ2375 | Factoid | {
"string": "Can you provide links to code used in papers that benchmark the VGG8B + LocalLearning + CO model?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"VGG8B + LocalLearning + CO\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ1509 | Factoid | {
"string": "Which model has achieved the highest Matched score on the MultiNLI benchmark dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Matched\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"MultiNLI\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
AQ1837 | Factoid | {
"string": "Provide a list of benchmarked datasets related to the Information Extraction research area?"
} | [] | {
"sparql": "SELECT DISTINCT ?dataset ?dataset_lbl\nWHERE {\n ?problem a orkgc:Problem;\n rdfs:label ?problem_lbl. \n FILTER (str(?problem_lbl) = \"Information Extraction\")\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:P32 ?problem.\n}"
} | T06 | Tree | WHICH-WHAT | true | 5 |
AQ0160 | Factoid | {
"string": "Can you list the models that have been evaluated on the BIOSSES dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"BIOSSES\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}"
} | T01 | Tree | WHICH-WHAT | true | 6 |
AQ0010 | Factoid | {
"string": "What models are being evaluated on the TSE-NER dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"TSE-NER\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}"
} | T01 | Tree | WHICH-WHAT | true | 6 |
AQ0268 | Factoid | {
"string": "Can you list the models that have been evaluated on the Atari 2600 Asterix dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Asterix\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}"
} | T01 | Tree | WHICH-WHAT | true | 6 |
AQ1294 | non-factoid | {
"string": "What is the top benchmark score and its metric on the Atari 2600 Kangaroo dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Kangaroo\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl"
} | T04 | Tree | WHICH-WHAT | true | 13 |
AQ2007 | Factoid | {
"string": "Provide a list of papers that have utilized the Ours: cross-sentence ALB model and include the links to their code?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"Ours: cross-sentence ALB\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ0965 | Factoid | {
"string": "What are the metrics of evaluation over the Atari 2600 Road Runner dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Road Runner\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}"
} | T03 | Tree | WHICH-WHAT | true | 6 |
HQ0040 | Factoid | {
"string": "Which countries are considered in the papers about geopolitics?"
} | [
"About which countries are there research papers on geopolitcs?"
] | {
"sparql": "SELECT DISTINCT ?location\nWHERE {\n ?_ orkgp:compareContribution [\n orkgp:P32 [\n rdfs:label ?label\n ];\n orkgp:P5049 ?location\n ]\n FILTER(REGEX(STR(?label), \"geopoli?tics\"))\n}"
} | null | tree | WHAT-WHEN | false | 4 |
AQ2448 | Factoid | {
"string": "Can you provide links to code used in papers that benchmark the ViT-B/16 model?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"ViT-B/16\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ1893 | Factoid | {
"string": "List the datasets benchmarked under the relation extraction research problem?"
} | [] | {
"sparql": "SELECT DISTINCT ?dataset ?dataset_lbl\nWHERE {\n ?problem a orkgc:Problem;\n rdfs:label ?problem_lbl. \n FILTER (str(?problem_lbl) = \"relation extraction\")\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:P32 ?problem.\n}"
} | T06 | Tree | WHICH-WHAT | true | 5 |
AQ0057 | Factoid | {
"string": "What are the models that have been benchmarked on the CoNLL04 dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"CoNLL04\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}"
} | T01 | Tree | WHICH-WHAT | true | 6 |
AQ1318 | non-factoid | {
"string": "Can you provide the highest benchmark result, including the metric and score, for the Atari 2600 Gravitar dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Gravitar\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl"
} | T04 | Tree | WHICH-WHAT | true | 13 |
AQ1459 | Factoid | {
"string": "What is the best performing model benchmarking the Natural Questions dataset in terms of F1 (Long) metric?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"F1 (Long)\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Natural Questions\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
AQ1271 | non-factoid | {
"string": "What is the top benchmark score and its metric on the Atari 2600 Pong dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Pong\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl"
} | T04 | Tree | WHICH-WHAT | true | 13 |
AQ0367 | Factoid | {
"string": "Give me a list of research papers along with their titles and IDs, that have performed benchmarks on the Dataset mentions in Social Sciences dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Dataset mentions in Social Sciences\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}"
} | T02 | Tree | WHICH-WHAT | true | 5 |
AQ1693 | Factoid | {
"string": "Indicate the model that performed best in terms of Score metric on the Atari 2600 Double Dunk benchmark dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Score\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Double Dunk\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
AQ0982 | Factoid | {
"string": "List the metrics that are used to evaluate models on the Ohsumed benchmark dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Ohsumed\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}"
} | T03 | Tree | WHICH-WHAT | true | 6 |
AQ0801 | Factoid | {
"string": "What evaluation metrics are commonly used when benchmarking models on the CommonsenseQA dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"CommonsenseQA\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}"
} | T03 | Tree | WHICH-WHAT | true | 6 |
AQ1190 | non-factoid | {
"string": "Can you provide the highest benchmark result, including the metric and score, for the ImageNet dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"ImageNet\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl"
} | T04 | Tree | WHICH-WHAT | true | 13 |
AQ0738 | Factoid | {
"string": "What evaluation metrics are commonly used when benchmarking models on the TACRED dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"TACRED\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}"
} | T03 | Tree | WHICH-WHAT | true | 6 |
AQ1875 | Factoid | {
"string": "List the datasets benchmarked under the Sentiment Analysis research problem?"
} | [] | {
"sparql": "SELECT DISTINCT ?dataset ?dataset_lbl\nWHERE {\n ?problem a orkgc:Problem;\n rdfs:label ?problem_lbl. \n FILTER (str(?problem_lbl) = \"Sentiment Analysis\")\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:P32 ?problem.\n}"
} | T06 | Tree | WHICH-WHAT | true | 5 |
AQ0184 | Factoid | {
"string": "What models are being evaluated on the BC2GM dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"BC2GM\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}"
} | T01 | Tree | WHICH-WHAT | true | 6 |
AQ1834 | Factoid | {
"string": "List the datasets benchmarked under the Automated Reinforcement Learning (AutoRL) research problem?"
} | [] | {
"sparql": "SELECT DISTINCT ?dataset ?dataset_lbl\nWHERE {\n ?problem a orkgc:Problem;\n rdfs:label ?problem_lbl. \n FILTER (str(?problem_lbl) = \"Automated Reinforcement Learning (AutoRL)\")\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:P32 ?problem.\n}"
} | T06 | Tree | WHICH-WHAT | true | 5 |
AQ1053 | non-factoid | {
"string": "What is the top benchmark result (metric and value) over the dataset Dataset mentions in Social Sciences?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Dataset mentions in Social Sciences\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl"
} | T04 | Tree | WHICH-WHAT | true | 13 |
AQ2043 | Factoid | {
"string": "Where can I find code references in papers that have used the Image Transformer model for benchmarking purposes?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"Image Transformer\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ1682 | Factoid | {
"string": "Indicate the model that performed best in terms of ROUGE-L metric on the GigaWord benchmark dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"ROUGE-L\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"GigaWord\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
AQ1796 | Factoid | {
"string": "What is the name of the top performing model in terms of Accuracy score when benchmarked on the Recipe dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Accuracy\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Recipe\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
AQ2303 | Factoid | {
"string": "Where can I find code references in papers that have used the DARQN soft model for benchmarking purposes?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"DARQN soft\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ0556 | Factoid | {
"string": "Give me a list of research papers along with their titles and IDs, that have performed benchmarks on the Food-101 dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Food-101\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}"
} | T02 | Tree | WHICH-WHAT | true | 5 |
AQ2244 | Factoid | {
"string": "Provide a list of papers that have utilized the Linear SVM model and include the links to their code?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"Linear SVM\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ0477 | Factoid | {
"string": "What are the titles and IDs of research papers that include a benchmark for the ARC (Challenge) dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"ARC (Challenge)\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}"
} | T02 | Tree | WHICH-WHAT | true | 5 |
AQ0621 | Factoid | {
"string": "List the title and ID of research papers that contain a benchmark over the Atari 2600 Battle Zone dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Battle Zone\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}"
} | T02 | Tree | WHICH-WHAT | true | 5 |
AQ1446 | Factoid | {
"string": "What is the best performing model benchmarking the HMDB51 dataset in terms of Top-1 Accuracy metric?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Top-1 Accuracy\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"HMDB51\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
AQ0832 | Factoid | {
"string": "List the metrics that are used to evaluate models on the SNLI benchmark dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"SNLI\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}"
} | T03 | Tree | WHICH-WHAT | true | 6 |
AQ1814 | Factoid | {
"string": "Which model has achieved the highest Top 5 Accuracy score on the ObjectNet benchmark dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Top 5 Accuracy\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"ObjectNet\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
AQ1749 | Factoid | {
"string": "Indicate the model that performed best in terms of Score metric on the Atari 2600 Pitfall! benchmark dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Score\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Pitfall!\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
AQ0770 | Factoid | {
"string": "List the metrics that are used to evaluate models on the IWSLT2015 German-English benchmark dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"IWSLT2015 German-English\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}"
} | T03 | Tree | WHICH-WHAT | true | 6 |
AQ0772 | Factoid | {
"string": "What are the metrics of evaluation over the WMT2016 Russian-English dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"WMT2016 Russian-English\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}"
} | T03 | Tree | WHICH-WHAT | true | 6 |
AQ1963 | Factoid | {
"string": "Can you provide links to code used in papers that benchmark the LibLinear model?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"LibLinear\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ0183 | Factoid | {
"string": "What models are being evaluated on the BC5CDR-chemical dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"BC5CDR-chemical\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}"
} | T01 | Tree | WHICH-WHAT | true | 6 |
AQ0783 | Factoid | {
"string": "What are the metrics of evaluation over the Multimodal PISA dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Multimodal PISA\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}"
} | T03 | Tree | WHICH-WHAT | true | 6 |
AQ2381 | Factoid | {
"string": "Provide a list of papers that have utilized the EffNet-L2 (SAM) model and include the links to their code?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"EffNet-L2 (SAM)\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ1959 | Factoid | {
"string": "List the code links in papers that use the EneRex model in any benchmark?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"EneRex\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ2053 | Factoid | {
"string": "Provide a list of papers that have utilized the AVID+CMA (Modified R2+1D-18 on Kinetics) model and include the links to their code?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"AVID+CMA (Modified R2+1D-18 on Kinetics)\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ2312 | Factoid | {
"string": "Where can I find code references in papers that have used the DQN Best model for benchmarking purposes?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"DQN Best\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ2144 | Factoid | {
"string": "Can you provide links to code used in papers that benchmark the 12-layer Character Transformer Model model?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"12-layer Character Transformer Model\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ1123 | non-factoid | {
"string": "What is the highest benchmark result achieved on the STL-10 dataset, including the metric and its value?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"STL-10\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl"
} | T04 | Tree | WHICH-WHAT | true | 13 |
AQ0212 | Factoid | {
"string": "Can you list the models that have been evaluated on the CINIC-10 dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"CINIC-10\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}"
} | T01 | Tree | WHICH-WHAT | true | 6 |
AQ1700 | Factoid | {
"string": "Which model has achieved the highest Score score on the Atari 2600 Berzerk benchmark dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Score\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Berzerk\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
AQ0229 | Factoid | {
"string": "What models are being evaluated on the X-Sum dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"X-Sum\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}"
} | T01 | Tree | WHICH-WHAT | true | 6 |
AQ1343 | non-factoid | {
"string": "What is the highest benchmark result achieved on the VTAB-1k dataset, including the metric and its value?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"VTAB-1k\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl"
} | T04 | Tree | WHICH-WHAT | true | 13 |
AQ1565 | Factoid | {
"string": "What is the name of the top performing model in terms of F1 entity level score when benchmarked on the BC5CDR-chemical dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"F1 entity level\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"BC5CDR-chemical\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
AQ2184 | Factoid | {
"string": "Provide a list of papers that have utilized the Rfa-Gate-Gaussian-Stateful (Small) model and include the links to their code?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"Rfa-Gate-Gaussian-Stateful (Small)\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ1927 | Factoid | {
"string": "Where can I find code references in papers that have used the Luna model for benchmarking purposes?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"Luna\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ1533 | Factoid | {
"string": "Which model has achieved the highest Bits per byte score on the The Pile benchmark dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Bits per byte\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"The Pile\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
AQ0375 | Factoid | {
"string": "List the title and ID of research papers that contain a benchmark over the Annotated development corpus dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Annotated development corpus\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}"
} | T02 | Tree | WHICH-WHAT | true | 5 |
AQ0950 | Factoid | {
"string": "List the metrics that are used to evaluate models on the Atari 2600 Bank Heist benchmark dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Bank Heist\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}"
} | T03 | Tree | WHICH-WHAT | true | 6 |
AQ0051 | Factoid | {
"string": "Could you provide a list of models that have been tested on the WebNLG benchmark dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"WebNLG\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}"
} | T01 | Tree | WHICH-WHAT | true | 6 |
HQ0013 | Factoid/Superlative | {
"string": "What was the most common type of approach for summarization before 2002?"
} | [
"Which was the most popular approach for summarization until 2002?"
] | {
"sparql": "SELECT ?approach ?approach_label\nWHERE {\n orkgr:R6948 orkgp:compareContribution ?cont.\n ?cont orkgp:P15 ?implementation.\n ?implementation orkgp:P5043 ?approach.\n ?approach rdfs:label ?approach_label.\n}\nORDER BY DESC(COUNT(?approach_label))\nLIMIT 1"
} | null | chain | WHICH-WHAT | false | 4 |
AQ1443 | Factoid | {
"string": "Which model has achieved the highest Pre-Training Dataset score on the UCF101 benchmark dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Pre-Training Dataset\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"UCF101\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
AQ0744 | Factoid | {
"string": "What evaluation metrics are commonly used when benchmarking models on the ACE 2004 dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"ACE 2004\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}"
} | T03 | Tree | WHICH-WHAT | true | 6 |
AQ2085 | Factoid | {
"string": "List the code links in papers that use the FusionNet (single model) model in any benchmark?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"FusionNet (single model)\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ0273 | Factoid | {
"string": "What models are being evaluated on the Atari 2600 Tennis dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Tennis\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}"
} | T01 | Tree | WHICH-WHAT | true | 6 |
HQ0061 | Non-factoid | {
"string": "Which vegetables are utilized for betanin extraction?"
} | [
"What vegetables are used in the process of betanin extraction?"
] | {
"sparql": "SELECT ?vegetables, ?vegetables_labels\nWHERE {\n orkgr:R75363 orkgp:compareContribution ?contrib.\n ?contrib orkgp:P35147 ?compounds.\n ?compounds rdfs:label ?compounds_labels.\n FILTER(REGEX(?compounds_labels, \"etanin\"))\n ?contrib orkgp:P35148 ?vegetables.\n ?vegetables rdfs:label ?vegetables_labels.\n}"
} | null | tree | WHICH-WHAT | false | 5 |
AQ1619 | Factoid | {
"string": "What is the name of the top performing model in terms of Top 1 Accuracy score when benchmarked on the ImageNet dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?metric a orkgc:Metric;\n rdfs:label ?metric_lbl.\n FILTER (str(?metric_lbl) = \"Top 1 Accuracy\")\n {\n SELECT ?model ?model_lbl\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"ImageNet\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value;\n orkgp:HAS_METRIC ?metric.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.\n }\n ORDER BY DESC(?value)\n LIMIT 1\n }\n}"
} | T05 | Tree | WHICH-WHAT | true | 12 |
AQ0724 | Factoid | {
"string": "What are the metrics of evaluation over the Car speed in Liuliqiao District, Beijing dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Car speed in Liuliqiao District, Beijing\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}"
} | T03 | Tree | WHICH-WHAT | true | 6 |
HQ0086 | Non-factoid | {
"string": "What are the research problems Vernier Effect is related to?"
} | [
"What is the list of research problems related to Vernier effect?"
] | {
"sparql": "SELECT DISTINCT ?problems, ?problems_labels\nWHERE {\n ?papers rdf:type orkgc:Paper.\n ?papers rdfs:label ?papers_labels.\n FILTER(REGEX(?papers_labels, \"Vernier Effect\", \"i\"))\n ?papers orkgp:P31 ?contrib.\n ?contrib orkgp:P32 ?problems.\n ?problems rdfs:label ?problems_labels.\n}"
} | null | tree | WHICH-WHAT | false | 5 |
AQ2460 | Factoid | {
"string": "Are there any research problems with benchmark datasets in the realm of Natural Language Processing research?"
} | [] | {
"sparql": "SELECT DISTINCT ?problem ?problem_lbl\nWHERE {\n ?rf a orkgc:ResearchField;\n rdfs:label ?rf_label.\n FILTER (str(?rf_label) = \"Natural Language Processing\")\n ?paper orkgp:P30 ?rf;\n orkgp:P31 ?cont.\n ?cont orkgp:HAS_BENCHMARK ?benchmark;\n orkgp:P32 ?problem.\n ?problem rdfs:label ?problem_lbl.\n}"
} | T08 | Tree | WHICH-WHAT | true | 5 |
AQ0008 | Factoid | {
"string": "What models are being evaluated on the Scholarly entity usage detection dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Scholarly entity usage detection\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}"
} | T01 | Tree | WHICH-WHAT | true | 6 |
AQ0418 | Factoid | {
"string": "Give me a list of research papers along with their titles and IDs, that have performed benchmarks on the WMT2014 English-French dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"WMT2014 English-French\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}"
} | T02 | Tree | WHICH-WHAT | true | 5 |
AQ1987 | Factoid | {
"string": "Where can I find code references in papers that have used the ETL-Span model for benchmarking purposes?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"ETL-Span\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ0252 | Factoid | {
"string": "What are the models that have been benchmarked on the Atari 2600 Asteroids dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"Atari 2600 Asteroids\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}"
} | T01 | Tree | WHICH-WHAT | true | 6 |
AQ0037 | Factoid | {
"string": "Could you provide a list of models that have been tested on the smallNLP-KG benchmark dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"smallNLP-KG\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}"
} | T01 | Tree | WHICH-WHAT | true | 6 |
AQ0302 | Factoid | {
"string": "Can you list the models that have been evaluated on the HoC dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"HoC\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}"
} | T01 | Tree | WHICH-WHAT | true | 6 |
AQ0803 | Factoid | {
"string": "What evaluation metrics are commonly used when benchmarking models on the WebQuestions dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"WebQuestions\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}"
} | T03 | Tree | WHICH-WHAT | true | 6 |
AQ0062 | Factoid | {
"string": "Can you list the models that have been evaluated on the SemEval-2010 Task 8 dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"SemEval-2010 Task 8\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}"
} | T01 | Tree | WHICH-WHAT | true | 6 |
AQ0870 | Factoid | {
"string": "Can you list the metrics used to evaluate models on the BC2GM dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"BC2GM\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n}"
} | T03 | Tree | WHICH-WHAT | true | 6 |
AQ0401 | Factoid | {
"string": "List the title and ID of research papers that contain a benchmark over the ACE 2004 dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"ACE 2004\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}"
} | T02 | Tree | WHICH-WHAT | true | 5 |
AQ0465 | Factoid | {
"string": "What are the titles and IDs of research papers that include a benchmark for the SQuAD1.1 dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?paper ?paper_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"SQuAD1.1\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?paper orkgp:P31 ?cont;\n rdfs:label ?paper_lbl.\n}"
} | T02 | Tree | WHICH-WHAT | true | 5 |
AQ1973 | Factoid | {
"string": "Can you provide links to code used in papers that benchmark the SciBERT + CNN model?"
} | [] | {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"SciBERT + CNN\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
} | T07 | Tree | WHICH-WHAT | true | 4 |
AQ1057 | non-factoid | {
"string": "What is the top benchmark result (metric and value) over the dataset CS-NER?"
} | [] | {
"sparql": "SELECT DISTINCT ?metric ?metric_lbl (MAX(?value) AS ?score)\nWHERE {\n {\n SELECT ?metric ?metric_lbl ?value\n WHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"CS-NER\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?eval orkgp:HAS_VALUE ?value.\n OPTIONAL {?eval orkgp:HAS_METRIC ?metric.\n ?metric rdfs:label ?metric_lbl.}\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?cont orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n }\n ORDER BY DESC(?value)\n }\n}\nGROUP BY ?metric ?metric_lbl"
} | T04 | Tree | WHICH-WHAT | true | 13 |
AQ0001 | Factoid | {
"string": "What models are being evaluated on the SemEval-2018 Task 7 dataset dataset?"
} | [] | {
"sparql": "SELECT DISTINCT ?model ?model_lbl\nWHERE {\n ?dataset a orkgc:Dataset;\n rdfs:label ?dataset_lbl.\n FILTER (str(?dataset_lbl) = \"SemEval-2018 Task 7 dataset\")\n ?benchmark orkgp:HAS_DATASET ?dataset;\n orkgp:HAS_EVALUATION ?eval.\n ?paper orkgp:HAS_BENCHMARK ?benchmark.\n OPTIONAL {?paper orkgp:HAS_MODEL ?model.\n ?model rdfs:label ?model_lbl.}\n}"
} | T01 | Tree | WHICH-WHAT | true | 6 |
End of preview. Expand
in Dataset Viewer.
Dataset Card for SciQA
Dataset Summary
SciQA contains 2,565 SPARQL query - question pairs along with answers fetched from the open research knowledge graph (ORKG) via a Virtuoso SPARQL endpoint, it is a collection of both handcrafted and autogenerated questions and queries. The dataset is split into 70% training, 10% validation and 20% test examples.
Dataset Structure
Data Instances
An example of a question is given below:
{
"id": "AQ2251",
"query_type": "Factoid",
"question": {
"string": "Provide a list of papers that have utilized the Depth DDPPO model and include the links to their code?"
},
"paraphrased_question": [],
"query": {
"sparql": "SELECT DISTINCT ?code\nWHERE {\n ?model a orkgc:Model;\n rdfs:label ?model_lbl.\n FILTER (str(?model_lbl) = \"Depth DDPPO\")\n ?benchmark orkgp:HAS_DATASET ?dataset.\n ?cont orkgp:HAS_BENCHMARK ?benchmark.\n ?cont orkgp:HAS_MODEL ?model;\n orkgp:HAS_SOURCE_CODE ?code.\n}"
},
"template_id": "T07",
"auto_generated": true,
"query_shape": "Tree",
"query_class": "WHICH-WHAT",
"number_of_patterns": 4,
}
Data Fields
id
: the id of the questionquestion
: a string containing the questionparaphrased_question
: a set of paraphrased versions of the questionquery
: a SPARQL query that answers the questionquery_type
: the type of the queryquery_template
: an optional template of the queryquery_shape
: a string indicating the shape of the queryquery_class
: a string indicating the class of the queryauto_generated
: a boolean indicating whether the question is auto-generated or notnumber_of_patterns
: an integer number indicating the number of gtaph patterns in the query
Data Splits
The dataset is split into 70% training, 10% validation and 20% test questions.
Additional Information
Licensing Information
SciQA is licensed under the Creative Commons Attribution 4.0 International License (CC BY 4.0).
Citation Information
@Article{SciQA2023,
author={Auer, S{\"o}ren
and Barone, Dante A. C.
and Bartz, Cassiano
and Cortes, Eduardo G.
and Jaradeh, Mohamad Yaser
and Karras, Oliver
and Koubarakis, Manolis
and Mouromtsev, Dmitry
and Pliukhin, Dmitrii
and Radyush, Daniil
and Shilin, Ivan
and Stocker, Markus
and Tsalapati, Eleni},
title={The SciQA Scientific Question Answering Benchmark for Scholarly Knowledge},
journal={Scientific Reports},
year={2023},
month={May},
day={04},
volume={13},
number={1},
pages={7240},
abstract={Knowledge graphs have gained increasing popularity in the last decade in science and technology. However, knowledge graphs are currently relatively simple to moderate semantic structures that are mainly a collection of factual statements. Question answering (QA) benchmarks and systems were so far mainly geared towards encyclopedic knowledge graphs such as DBpedia and Wikidata. We present SciQA a scientific QA benchmark for scholarly knowledge. The benchmark leverages the Open Research Knowledge Graph (ORKG) which includes almost 170,000 resources describing research contributions of almost 15,000 scholarly articles from 709 research fields. Following a bottom-up methodology, we first manually developed a set of 100 complex questions that can be answered using this knowledge graph. Furthermore, we devised eight question templates with which we automatically generated further 2465 questions, that can also be answered with the ORKG. The questions cover a range of research fields and question types and are translated into corresponding SPARQL queries over the ORKG. Based on two preliminary evaluations, we show that the resulting SciQA benchmark represents a challenging task for next-generation QA systems. This task is part of the open competitions at the 22nd International Semantic Web Conference 2023 as the Scholarly Question Answering over Linked Data (QALD) Challenge.},
issn={2045-2322},
doi={10.1038/s41598-023-33607-z},
url={https://doi.org/10.1038/s41598-023-33607-z}
}
Contributions
Thanks to @YaserJaradeh for adding this dataset.
- Downloads last month
- 165