medmcqa / dataset_infos.json
albertvillanova's picture
Convert dataset to Parquet
c8b1a7c
{
"default": {
"description": "MedMCQA is a large-scale, Multiple-Choice Question Answering (MCQA) dataset designed to address real-world medical entrance exam questions.\nMedMCQA has more than 194k high-quality AIIMS & NEET PG entrance exam MCQs covering 2.4k healthcare topics and 21 medical subjects are collected with an average token length of 12.77 and high topical diversity.\nThe dataset contains questions about the following topics: Anesthesia, Anatomy, Biochemistry, Dental, ENT, Forensic Medicine (FM)\nObstetrics and Gynecology (O&G), Medicine, Microbiology, Ophthalmology, Orthopedics Pathology, Pediatrics, Pharmacology, Physiology,\nPsychiatry, Radiology Skin, Preventive & Social Medicine (PSM) and Surgery\n",
"citation": "@InProceedings{pmlr-v174-pal22a,\n title = \t {MedMCQA: A Large-scale Multi-Subject Multi-Choice Dataset for Medical domain Question Answering},\n author = {Pal, Ankit and Umapathi, Logesh Kumar and Sankarasubbu, Malaikannan},\n booktitle = \t {Proceedings of the Conference on Health, Inference, and Learning},\n pages = \t {248--260},\n year = \t {2022},\n editor = \t {Flores, Gerardo and Chen, George H and Pollard, Tom and Ho, Joyce C and Naumann, Tristan},\n volume = \t {174},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {07--08 Apr},\n publisher = {PMLR},\n pdf = \t {https://proceedings.mlr.press/v174/pal22a/pal22a.pdf},\n url = \t {https://proceedings.mlr.press/v174/pal22a.html},\n abstract = \t {This paper introduces MedMCQA, a new large-scale, Multiple-Choice Question Answering (MCQA) dataset designed to address real-world medical entrance exam questions. More than 194k high-quality AIIMS & NEET PG entrance exam MCQs covering 2.4k healthcare topics and 21 medical subjects are collected with an average token length of 12.77 and high topical diversity. Each sample contains a question, correct answer(s), and other options which requires a deeper language understanding as it tests the 10+ reasoning abilities of a model across a wide range of medical subjects & topics. A detailed explanation of the solution, along with the above information, is provided in this study.}\n}\n",
"homepage": "https://medmcqa.github.io",
"license": "Apache License 2.0",
"features": {
"id": {
"dtype": "string",
"_type": "Value"
},
"question": {
"dtype": "string",
"_type": "Value"
},
"opa": {
"dtype": "string",
"_type": "Value"
},
"opb": {
"dtype": "string",
"_type": "Value"
},
"opc": {
"dtype": "string",
"_type": "Value"
},
"opd": {
"dtype": "string",
"_type": "Value"
},
"cop": {
"names": [
"a",
"b",
"c",
"d"
],
"_type": "ClassLabel"
},
"choice_type": {
"dtype": "string",
"_type": "Value"
},
"exp": {
"dtype": "string",
"_type": "Value"
},
"subject_name": {
"dtype": "string",
"_type": "Value"
},
"topic_name": {
"dtype": "string",
"_type": "Value"
}
},
"builder_name": "medmcqa",
"dataset_name": "medmcqa",
"config_name": "default",
"version": {
"version_str": "1.1.0",
"major": 1,
"minor": 1,
"patch": 0
},
"splits": {
"train": {
"name": "train",
"num_bytes": 131903297,
"num_examples": 182822,
"dataset_name": null
},
"test": {
"name": "test",
"num_bytes": 1399350,
"num_examples": 6150,
"dataset_name": null
},
"validation": {
"name": "validation",
"num_bytes": 2221428,
"num_examples": 4183,
"dataset_name": null
}
},
"download_size": 88311487,
"dataset_size": 135524075,
"size_in_bytes": 223835562
}
}